From 997c7dfe2945594b1d9efda636b0dcab11f8aea9 Mon Sep 17 00:00:00 2001 From: Patrick Decat Date: Mon, 20 Jun 2022 12:03:46 +0200 Subject: [PATCH] go get -u ./... && go mod tidy && go mod vendor --- go.mod | 21 +- go.sum | 614 +- .../github.com/agext/levenshtein/.travis.yml | 7 +- vendor/github.com/agext/levenshtein/README.md | 2 +- .../agext/levenshtein/levenshtein.go | 2 +- .../apparentlymart/go-cidr/cidr/cidr.go | 38 +- .../apparentlymart/go-textseg/LICENSE | 95 - .../go-textseg/textseg/all_tokens.go | 30 - .../go-textseg/textseg/generate.go | 7 - .../go-textseg/textseg/grapheme_clusters.go | 5276 --------------- .../go-textseg/textseg/grapheme_clusters.rl | 132 - .../textseg/grapheme_clusters_table.rl | 1583 ----- .../go-textseg/textseg/tables.go | 5700 ----------------- .../go-textseg/textseg/unicode2ragel.rb | 335 - .../go-textseg/textseg/utf8_seqs.go | 19 - vendor/github.com/fatih/color/.travis.yml | 5 - vendor/github.com/fatih/color/Gopkg.lock | 27 - vendor/github.com/fatih/color/Gopkg.toml | 30 - vendor/github.com/fatih/color/README.md | 29 +- vendor/github.com/fatih/color/color.go | 25 +- vendor/github.com/fatih/color/doc.go | 2 + vendor/github.com/fatih/color/go.mod | 8 + vendor/github.com/fatih/color/go.sum | 9 + vendor/github.com/go-stack/stack/.travis.yml | 15 - vendor/github.com/go-stack/stack/go.mod | 2 + .../golang/protobuf/jsonpb/decode.go | 524 ++ .../golang/protobuf/jsonpb/encode.go | 559 ++ .../github.com/golang/protobuf/jsonpb/json.go | 69 + .../golang/protobuf/proto/registry.go | 10 +- .../protoc-gen-go/descriptor/descriptor.pb.go | 200 - .../github.com/golang/protobuf/ptypes/any.go | 14 + .../github.com/golang/protobuf/ptypes/doc.go | 4 + .../golang/protobuf/ptypes/duration.go | 4 + .../golang/protobuf/ptypes/timestamp.go | 9 + vendor/github.com/golang/snappy/AUTHORS | 3 + vendor/github.com/golang/snappy/CONTRIBUTORS | 4 + vendor/github.com/golang/snappy/decode.go | 87 +- .../github.com/golang/snappy/decode_amd64.go | 14 - .../snappy/decode_arm64.s} | 406 +- .../snappy/decode_asm.go} | 1 + .../github.com/golang/snappy/decode_other.go | 24 +- vendor/github.com/golang/snappy/encode.go | 4 + .../github.com/golang/snappy/encode_arm64.s | 722 +++ .../snappy/{encode_amd64.go => encode_asm.go} | 1 + .../github.com/golang/snappy/encode_other.go | 2 +- .../github.com/google/go-cmp/cmp/compare.go | 19 +- .../google/go-cmp/cmp/export_panic.go | 1 + .../google/go-cmp/cmp/export_unsafe.go | 1 + .../go-cmp/cmp/internal/diff/debug_disable.go | 1 + .../go-cmp/cmp/internal/diff/debug_enable.go | 1 + .../cmp/internal/flags/toolchain_legacy.go | 10 - .../cmp/internal/flags/toolchain_recent.go | 10 - .../google/go-cmp/cmp/internal/value/name.go | 7 + .../cmp/internal/value/pointer_purego.go | 1 + .../cmp/internal/value/pointer_unsafe.go | 1 + vendor/github.com/google/go-cmp/cmp/path.go | 2 +- .../google/go-cmp/cmp/report_compare.go | 5 +- .../google/go-cmp/cmp/report_reflect.go | 13 +- .../google/go-cmp/cmp/report_slices.go | 6 +- .../github.com/hashicorp/errwrap/errwrap.go | 9 + .../github.com/hashicorp/go-hclog/README.md | 12 +- .../hashicorp/go-hclog/colorize_unix.go | 2 + .../hashicorp/go-hclog/colorize_windows.go | 7 +- .../github.com/hashicorp/go-hclog/global.go | 2 + vendor/github.com/hashicorp/go-hclog/go.mod | 10 +- vendor/github.com/hashicorp/go-hclog/go.sum | 36 +- .../hashicorp/go-hclog/interceptlogger.go | 7 +- .../hashicorp/go-hclog/intlogger.go | 64 +- .../github.com/hashicorp/go-hclog/logger.go | 24 + .../github.com/hashicorp/go-hclog/stdlog.go | 23 +- .../github.com/hashicorp/go-plugin/README.md | 5 +- .../github.com/hashicorp/go-plugin/client.go | 23 +- vendor/github.com/hashicorp/go-plugin/go.mod | 13 +- vendor/github.com/hashicorp/go-plugin/go.sum | 5 - .../hashicorp/go-plugin/grpc_client.go | 9 +- .../hashicorp/go-plugin/process_posix.go | 1 + .../hashicorp/go-plugin/process_windows.go | 1 + .../hashicorp/go-plugin/rpc_server.go | 6 +- .../github.com/hashicorp/go-plugin/server.go | 6 +- vendor/github.com/hashicorp/go-uuid/LICENSE | 2 + .../hashicorp/go-version/CHANGELOG.md | 7 + .../github.com/hashicorp/go-version/README.md | 2 +- .../hashicorp/go-version/constraint.go | 108 +- .../hashicorp/go-version/version.go | 23 +- vendor/github.com/hashicorp/hc-install/go.mod | 4 +- vendor/github.com/hashicorp/hc-install/go.sum | 10 +- .../releasesjson/checksum_downloader.go | 15 +- .../internal/releasesjson/downloader.go | 21 +- .../internal/releasesjson/releases.go | 4 +- .../hashicorp/hc-install/product/consul.go | 6 +- .../github.com/hashicorp/hcl/v2/CHANGELOG.md | 145 + vendor/github.com/hashicorp/hcl/v2/README.md | 16 +- .../github.com/hashicorp/hcl/v2/appveyor.yml | 13 - .../github.com/hashicorp/hcl/v2/diagnostic.go | 4 +- vendor/github.com/hashicorp/hcl/v2/go.mod | 8 +- vendor/github.com/hashicorp/hcl/v2/go.sum | 19 +- .../hashicorp/hcl/v2/hclsyntax/expression.go | 344 +- .../hcl/v2/hclsyntax/expression_template.go | 31 +- .../hashicorp/hcl/v2/hclsyntax/parser.go | 178 +- .../hcl/v2/hclsyntax/parser_template.go | 47 +- .../hashicorp/hcl/v2/hclsyntax/spec.md | 22 +- .../hashicorp/hcl/v2/hclsyntax/structure.go | 15 +- .../hashicorp/hcl/v2/hclsyntax/token.go | 49 +- vendor/github.com/hashicorp/hcl/v2/ops.go | 180 +- .../hashicorp/hcl/v2/pos_scanner.go | 2 +- .../internal/version/version.go | 2 +- .../hashicorp/terraform-exec/tfexec/add.go | 101 - .../hashicorp/terraform-exec/tfexec/cmd.go | 2 +- .../terraform-exec/tfexec/cmd_default.go | 1 + .../terraform-exec/tfexec/cmdstring.go | 12 - .../terraform-exec/tfexec/cmdstring_go112.go | 19 - .../terraform-exec/tfexec/exit_errors.go | 35 +- .../terraform-exec/tfexec/force_unlock.go | 50 + .../hashicorp/terraform-exec/tfexec/graph.go | 85 + .../terraform-exec/tfexec/options.go | 54 +- .../hashicorp/terraform-exec/tfexec/taint.go | 78 + .../terraform-exec/tfexec/terraform.go | 6 +- .../terraform-exec/tfexec/untaint.go | 78 + .../terraform-exec/tfexec/version.go | 5 + .../terraform-exec/tfexec/workspace_delete.go | 81 + .../terraform-exec/tfexec/workspace_show.go | 35 + .../hashicorp/terraform-json/config.go | 3 + .../hashicorp/terraform-json/go.mod | 7 +- .../hashicorp/terraform-json/go.sum | 24 +- .../hashicorp/terraform-json/plan.go | 13 + .../hashicorp/terraform-json/schemas.go | 7 + .../hashicorp/terraform-json/state.go | 28 + .../internal/logging/context.go | 82 + .../internal/logging/doc.go | 2 + .../internal/logging/environment_variables.go | 22 + .../internal/logging/keys.go | 32 + .../internal/logging/protocol.go | 22 + .../internal/logging/protocol_data.go | 108 + .../internal/logging/provider.go | 19 + .../tfprotov5/internal/tfplugin5/generate.sh | 16 - .../internal/tfplugin5/tfplugin5.pb.go | 9 +- .../internal/tfplugin5/tfplugin5_grpc.pb.go | 25 +- .../terraform-plugin-go/tfprotov5/resource.go | 3 + .../terraform-plugin-go/tfprotov5/schema.go | 102 + .../tfprotov5/tf5server/server.go | 584 +- .../tfprotov6/internal/fromproto/resource.go | 6 +- .../tfprotov6/internal/fromproto/schema.go | 4 +- .../tfprotov6/internal/tfplugin6/generate.sh | 16 - .../internal/tfplugin6/tfplugin6.pb.go | 684 +- .../internal/tfplugin6/tfplugin6.proto | 49 +- .../internal/tfplugin6/tfplugin6_grpc.pb.go | 4 + .../tfprotov6/internal/toproto/resource.go | 6 +- .../tfprotov6/internal/toproto/schema.go | 4 +- .../terraform-plugin-go/tfprotov6/schema.go | 159 +- .../tfprotov6/tf6server/server.go | 581 +- .../tftypes/attribute_path.go | 6 +- .../terraform-plugin-go/tftypes/diff.go | 3 + .../terraform-plugin-go/tftypes/list.go | 21 +- .../terraform-plugin-go/tftypes/map.go | 17 +- .../terraform-plugin-go/tftypes/object.go | 33 +- .../terraform-plugin-go/tftypes/primitive.go | 61 +- .../terraform-plugin-go/tftypes/set.go | 17 +- .../terraform-plugin-go/tftypes/tuple.go | 21 +- .../terraform-plugin-go/tftypes/type.go | 6 + .../terraform-plugin-go/tftypes/value.go | 15 +- .../terraform-plugin-go/tftypes/value_json.go | 32 +- .../tftypes/value_msgpack.go | 40 +- .../internal/hclogutils/args.go | 36 + .../internal/hclogutils/logger_options.go | 29 + .../internal/logging/log.go | 26 + .../internal/logging/options.go | 52 +- .../internal/logging/provider.go | 29 + .../internal/logging/sdk.go | 29 + .../internal/logging/sink.go | 51 + .../terraform-plugin-log/tflog/options.go | 14 + .../terraform-plugin-log/tflog/provider.go | 56 +- .../terraform-plugin-log/tflog/subsystem.go | 122 +- .../terraform-plugin-log/tfsdklog/options.go | 14 + .../terraform-plugin-log/tfsdklog/sdk.go | 102 +- .../terraform-plugin-log/tfsdklog/sink.go | 39 +- .../tfsdklog/subsystem.go | 122 +- .../v2/helper/acctest/random.go | 4 +- .../v2/helper/logging/logging.go | 13 +- .../v2/helper/logging/transport.go | 2 +- .../helper/resource/environment_variables.go | 32 + .../v2/helper/resource/plugin.go | 172 +- .../v2/helper/resource/state_shim.go | 12 +- .../v2/helper/resource/testcase_providers.go | 56 + .../v2/helper/resource/testcase_validate.go | 85 + .../v2/helper/resource/testing.go | 644 +- .../v2/helper/resource/testing_config.go | 13 +- .../v2/helper/resource/testing_new.go | 280 +- .../v2/helper/resource/testing_new_config.go | 183 +- .../resource/testing_new_import_state.go | 89 +- .../v2/helper/resource/testing_sets.go | 160 +- .../v2/helper/resource/teststep_providers.go | 48 + .../v2/helper/resource/teststep_validate.go | 99 + .../v2/helper/schema/field_reader.go | 2 +- .../v2/helper/schema/field_reader_config.go | 6 +- .../v2/helper/schema/field_reader_diff.go | 4 +- .../v2/helper/schema/field_reader_map.go | 9 +- .../v2/helper/schema/field_writer_map.go | 27 +- .../v2/helper/schema/grpc_provider.go | 264 +- .../v2/helper/schema/provider.go | 30 +- .../v2/helper/schema/resource.go | 557 +- .../v2/helper/schema/resource_data.go | 2 +- .../v2/helper/schema/resource_diff.go | 50 +- .../v2/helper/schema/resource_timeout.go | 8 +- .../v2/helper/schema/schema.go | 403 +- .../v2/helper/schema/set.go | 4 +- .../v2/helper/structure/suppress_json_diff.go | 6 +- .../v2/helper/validation/map.go | 12 +- .../v2/helper/validation/meta.go | 17 +- .../v2/helper/validation/strings.go | 4 +- .../v2/helper/validation/testing.go | 30 - .../v2/helper/validation/time.go | 4 +- .../v2/internal/addrs/module_instance.go | 2 - .../v2/internal/configs/hcl2shim/flatmap.go | 6 +- .../configs/hcl2shim/single_attr_body.go | 85 - .../v2/internal/logging/context.go | 75 + .../internal/logging/environment_variables.go | 24 + .../v2/internal/logging/helper_resource.go | 32 + .../v2/internal/logging/helper_schema.go | 32 + .../v2/internal/logging/keys.go | 45 + .../v2/internal/plugin/convert/diagnostics.go | 58 +- .../v2/internal/plugin/convert/schema.go | 35 +- .../v2/internal/plugintest/config.go | 22 +- .../plugintest/environment_variables.go | 63 + .../v2/internal/plugintest/guard.go | 47 +- .../v2/internal/plugintest/helper.go | 80 +- .../v2/internal/plugintest/util.go | 27 +- .../v2/internal/plugintest/working_dir.go | 204 +- .../terraform-plugin-sdk/v2/plugin/debug.go | 58 +- .../terraform-plugin-sdk/v2/plugin/serve.go | 182 +- .../terraform-plugin-sdk/v2/terraform/diff.go | 2 +- .../v2/terraform/resource.go | 4 +- .../v2/terraform/state.go | 38 +- .../v2/terraform/state_filter.go | 15 +- .../terraform-registry-address/.go-version | 2 +- .../terraform-registry-address/README.md | 31 +- .../terraform-registry-address/go.mod | 2 +- .../terraform-registry-address/go.sum | 10 +- .../terraform-registry-address/module.go | 241 + .../module_package.go | 87 + vendor/github.com/hashicorp/yamux/addr.go | 2 +- vendor/github.com/hashicorp/yamux/const.go | 27 +- vendor/github.com/hashicorp/yamux/go.mod | 2 + vendor/github.com/hashicorp/yamux/mux.go | 16 + vendor/github.com/hashicorp/yamux/session.go | 33 +- vendor/github.com/hashicorp/yamux/stream.go | 67 +- .../pagerduty/event_orchestration_path.go | 4 +- .../go-pagerduty/pagerduty/references.go | 3 + .../heimweh/go-pagerduty/pagerduty/service.go | 54 +- .../heimweh/go-pagerduty/pagerduty/user.go | 3 - .../pagerduty/webhook_subscription.go | 1 + .../klauspost/compress/.gitattributes | 2 + .../github.com/klauspost/compress/.gitignore | 32 + .../klauspost/compress/.goreleaser.yml | 141 + vendor/github.com/klauspost/compress/LICENSE | 276 + .../github.com/klauspost/compress/README.md | 529 ++ .../klauspost/compress/compressible.go | 85 + .../klauspost/compress/fse/compress.go | 11 +- .../klauspost/compress/fse/decompress.go | 4 +- vendor/github.com/klauspost/compress/gen.sh | 4 + vendor/github.com/klauspost/compress/go.mod | 3 + vendor/github.com/klauspost/compress/go.sum | 0 .../klauspost/compress/huff0/README.md | 4 +- .../klauspost/compress/huff0/bitreader.go | 122 +- .../klauspost/compress/huff0/bitwriter.go | 115 - .../klauspost/compress/huff0/bytereader.go | 10 - .../klauspost/compress/huff0/compress.go | 79 +- .../klauspost/compress/huff0/decompress.go | 999 ++- .../compress/huff0/decompress_amd64.go | 228 + .../compress/huff0/decompress_amd64.s | 865 +++ .../compress/huff0/decompress_generic.go | 295 + .../klauspost/compress/huff0/huff0.go | 64 + .../compress/internal/cpuinfo/cpuinfo.go | 34 + .../internal/cpuinfo/cpuinfo_amd64.go | 11 + .../compress/internal/cpuinfo/cpuinfo_amd64.s | 36 + .../{snappy => internal/snapref}/LICENSE | 0 .../{snappy => internal/snapref}/decode.go | 89 +- .../snapref}/decode_other.go | 6 +- .../{snappy => internal/snapref}/encode.go | 6 +- .../snapref}/encode_other.go | 4 +- .../{snappy => internal/snapref}/snappy.go | 4 +- vendor/github.com/klauspost/compress/s2sx.mod | 4 + vendor/github.com/klauspost/compress/s2sx.sum | 0 .../klauspost/compress/snappy/.gitignore | 16 - .../klauspost/compress/snappy/AUTHORS | 15 - .../klauspost/compress/snappy/CONTRIBUTORS | 37 - .../klauspost/compress/snappy/README | 107 - .../klauspost/compress/snappy/encode_amd64.go | 29 - .../klauspost/compress/snappy/encode_amd64.s | 730 --- .../klauspost/compress/snappy/runbench.cmd | 2 - .../klauspost/compress/zstd/README.md | 219 +- .../klauspost/compress/zstd/bitreader.go | 12 +- .../klauspost/compress/zstd/bitwriter.go | 98 +- .../klauspost/compress/zstd/blockdec.go | 546 +- .../klauspost/compress/zstd/blockenc.go | 207 +- .../klauspost/compress/zstd/bytebuf.go | 30 +- .../klauspost/compress/zstd/bytereader.go | 6 - .../klauspost/compress/zstd/decodeheader.go | 230 + .../klauspost/compress/zstd/decoder.go | 701 +- .../compress/zstd/decoder_options.go | 67 +- .../klauspost/compress/zstd/dict.go | 2 +- .../klauspost/compress/zstd/enc_base.go | 81 +- .../klauspost/compress/zstd/enc_best.go | 558 ++ .../klauspost/compress/zstd/enc_better.go | 738 ++- .../klauspost/compress/zstd/enc_dfast.go | 469 +- .../klauspost/compress/zstd/enc_fast.go | 467 +- .../klauspost/compress/zstd/encoder.go | 125 +- .../compress/zstd/encoder_options.go | 63 +- .../klauspost/compress/zstd/framedec.go | 378 +- .../klauspost/compress/zstd/fse_decoder.go | 65 +- .../klauspost/compress/zstd/fse_encoder.go | 43 +- .../klauspost/compress/zstd/fse_predefined.go | 2 +- .../klauspost/compress/zstd/hash.go | 66 +- .../klauspost/compress/zstd/history.go | 46 +- .../compress/zstd/internal/xxhash/xxhash.go | 1 - .../zstd/internal/xxhash/xxhash_amd64.s | 67 +- .../zstd/internal/xxhash/xxhash_arm64.s | 186 + .../xxhash/{xxhash_amd64.go => xxhash_asm.go} | 5 +- .../zstd/internal/xxhash/xxhash_other.go | 3 +- .../klauspost/compress/zstd/seqdec.go | 318 +- .../klauspost/compress/zstd/seqdec_amd64.go | 362 ++ .../klauspost/compress/zstd/seqdec_amd64.s | 3689 +++++++++++ .../klauspost/compress/zstd/seqdec_generic.go | 237 + .../klauspost/compress/zstd/seqenc.go | 1 - .../klauspost/compress/zstd/snappy.go | 9 +- .../github.com/klauspost/compress/zstd/zip.go | 141 + .../klauspost/compress/zstd/zstd.go | 62 +- .../github.com/mattn/go-colorable/.travis.yml | 9 - .../github.com/mattn/go-colorable/README.md | 6 +- .../mattn/go-colorable/colorable_appengine.go | 9 + .../mattn/go-colorable/colorable_others.go | 12 +- .../mattn/go-colorable/colorable_windows.go | 70 +- vendor/github.com/mattn/go-colorable/go.mod | 7 +- vendor/github.com/mattn/go-colorable/go.sum | 9 +- .../github.com/mattn/go-colorable/go.test.sh | 12 + .../mattn/go-colorable/noncolorable.go | 12 +- vendor/github.com/mattn/go-isatty/.travis.yml | 13 - vendor/github.com/mattn/go-isatty/README.md | 2 +- vendor/github.com/mattn/go-isatty/go.mod | 4 +- vendor/github.com/mattn/go-isatty/go.sum | 6 +- vendor/github.com/mattn/go-isatty/go.test.sh | 12 + .../mattn/go-isatty/isatty_android.go | 23 - .../github.com/mattn/go-isatty/isatty_bsd.go | 13 +- .../mattn/go-isatty/isatty_others.go | 3 +- .../mattn/go-isatty/isatty_plan9.go | 3 +- .../mattn/go-isatty/isatty_solaris.go | 9 +- .../mattn/go-isatty/isatty_tcgets.go | 4 +- .../mattn/go-isatty/isatty_windows.go | 6 +- .../github.com/mitchellh/go-wordwrap/go.mod | 2 + .../mitchellh/go-wordwrap/wordwrap.go | 26 +- .../mitchellh/mapstructure/.travis.yml | 8 - .../mitchellh/mapstructure/CHANGELOG.md | 75 + .../mitchellh/mapstructure/decode_hooks.go | 92 +- .../github.com/mitchellh/mapstructure/go.mod | 2 + .../mitchellh/mapstructure/mapstructure.go | 561 +- vendor/github.com/oklog/run/.travis.yml | 12 - vendor/github.com/oklog/run/README.md | 10 +- vendor/github.com/oklog/run/actors.go | 38 + vendor/github.com/oklog/run/go.mod | 3 + .../vmihailenco/msgpack/v4/.golangci.yml | 12 + .../vmihailenco/msgpack/v4/.travis.yml | 21 + .../vmihailenco/msgpack/v4/CHANGELOG.md | 24 + .../github.com/vmihailenco/msgpack/v4/LICENSE | 25 + .../vmihailenco/msgpack/v4/Makefile | 6 + .../vmihailenco/msgpack/v4/README.md | 72 + .../vmihailenco/msgpack/v4/appengine.go | 64 + .../vmihailenco/msgpack/v4/codes/codes.go | 90 + .../vmihailenco/msgpack/v4/decode.go | 617 ++ .../vmihailenco/msgpack/v4/decode_map.go | 350 + .../vmihailenco/msgpack/v4/decode_number.go | 307 + .../vmihailenco/msgpack/v4/decode_query.go | 158 + .../vmihailenco/msgpack/v4/decode_slice.go | 191 + .../vmihailenco/msgpack/v4/decode_string.go | 186 + .../vmihailenco/msgpack/v4/decode_value.go | 276 + .../vmihailenco/msgpack/v4/encode.go | 241 + .../vmihailenco/msgpack/v4/encode_map.go | 172 + .../vmihailenco/msgpack/v4/encode_number.go | 244 + .../vmihailenco/msgpack/v4/encode_slice.go | 131 + .../vmihailenco/msgpack/v4/encode_value.go | 216 + .../github.com/vmihailenco/msgpack/v4/ext.go | 244 + .../github.com/vmihailenco/msgpack/v4/go.mod | 13 + .../github.com/vmihailenco/msgpack/v4/go.sum | 24 + .../vmihailenco/msgpack/v4/intern.go | 236 + .../vmihailenco/msgpack/v4/msgpack.go | 17 + .../github.com/vmihailenco/msgpack/v4/safe.go | 13 + .../github.com/vmihailenco/msgpack/v4/time.go | 149 + .../vmihailenco/msgpack/v4/types.go | 382 ++ .../vmihailenco/msgpack/v4/unsafe.go | 22 + .../vmihailenco/tagparser/.travis.yml | 24 + .../github.com/vmihailenco/tagparser/LICENSE | 25 + .../github.com/vmihailenco/tagparser/Makefile | 8 + .../vmihailenco/tagparser/README.md | 24 + .../github.com/vmihailenco/tagparser/go.mod | 3 + .../tagparser/internal/parser/parser.go | 82 + .../vmihailenco/tagparser/internal/safe.go | 11 + .../vmihailenco/tagparser/internal/unsafe.go | 22 + .../vmihailenco/tagparser/tagparser.go | 181 + vendor/github.com/xdg-go/scram/CHANGELOG.md | 8 + vendor/github.com/xdg-go/scram/doc.go | 6 +- vendor/github.com/xdg-go/scram/go.mod | 2 +- vendor/github.com/xdg-go/scram/go.sum | 8 +- vendor/github.com/xdg-go/scram/scram.go | 5 + .../github.com/xdg-go/stringprep/CHANGELOG.md | 7 + vendor/github.com/xdg-go/stringprep/go.mod | 2 +- vendor/github.com/xdg-go/stringprep/go.sum | 4 +- vendor/github.com/youmark/pkcs8/.travis.yml | 7 +- vendor/github.com/youmark/pkcs8/README.md | 5 +- vendor/github.com/youmark/pkcs8/cipher.go | 60 + .../github.com/youmark/pkcs8/cipher_3des.go | 24 + vendor/github.com/youmark/pkcs8/cipher_aes.go | 84 + vendor/github.com/youmark/pkcs8/go.mod | 5 + vendor/github.com/youmark/pkcs8/go.sum | 7 + vendor/github.com/youmark/pkcs8/kdf_pbkdf2.go | 91 + vendor/github.com/youmark/pkcs8/kdf_scrypt.go | 62 + vendor/github.com/youmark/pkcs8/pkcs8.go | 420 +- .../zclconf/go-cty/cty/primitive_type.go | 45 + .../zclconf/go-cty/cty/value_ops.go | 2 +- .../go.mongodb.org/mongo-driver/bson/bson.go | 2 - .../mongo-driver/bson/bson_1_8.go | 81 - .../mongo-driver/bson/bsoncodec/bsoncodec.go | 2 +- .../bson/bsoncodec/default_value_decoders.go | 14 +- .../bson/bsoncodec/default_value_encoders.go | 11 +- .../mongo-driver/bson/bsoncodec/map_codec.go | 19 +- .../mongo-driver/bson/bsoncodec/registry.go | 14 +- .../bson/bsoncodec/struct_codec.go | 11 +- .../mongo-driver/bson/bsoncodec/types.go | 25 - .../bson/bsonrw/extjson_parser.go | 2 +- .../bson/bsonrw/extjson_wrappers.go | 6 +- .../bson/bsonrw/extjson_writer.go | 9 +- .../mongo-driver/bson/bsonrw/value_reader.go | 20 +- .../mongo-driver/bson/bsonrw/value_writer.go | 2 +- .../mongo-driver/bson/bsonrw/writer.go | 24 - .../mongo-driver/bson/bsontype/bsontype.go | 2 + .../go.mongodb.org/mongo-driver/bson/doc.go | 2 +- .../mongo-driver/bson/marshal.go | 25 + .../mongo-driver/bson/primitive/decimal.go | 11 +- .../mongo-driver/bson/primitive/objectid.go | 26 +- .../mongo-driver/bson/primitive/primitive.go | 2 +- .../mongo-driver/bson/primitive_codecs.go | 25 +- .../go.mongodb.org/mongo-driver/bson/raw.go | 7 - .../mongo-driver/bson/registry.go | 2 +- .../go.mongodb.org/mongo-driver/bson/types.go | 49 - .../mongo-driver/bson/unmarshal.go | 2 +- .../mongo-driver/event/monitoring.go | 15 +- .../mongo-driver/internal/const.go | 12 +- .../internal/randutil/randutil.go | 77 + .../mongo-driver/internal/semaphore.go | 57 - .../mongo-driver/internal/string_util.go | 2 +- .../internal/uri_validation_errors.go | 4 + .../mongo-driver/mongo/bulk_write.go | 28 +- .../mongo-driver/mongo/change_stream.go | 85 +- .../mongo/change_stream_deployment.go | 5 + .../mongo-driver/mongo/client.go | 47 +- .../mongo-driver/mongo/client_encryption.go | 3 +- .../mongo-driver/mongo/collection.go | 138 +- .../mongo-driver/mongo/crypt_retrievers.go | 6 + .../mongo-driver/mongo/cursor.go | 42 + .../mongo-driver/mongo/database.go | 12 + .../mongo-driver/mongo/description/server.go | 26 +- .../mongo/description/server_selector.go | 26 +- .../go.mongodb.org/mongo-driver/mongo/doc.go | 4 +- .../mongo-driver/mongo/errors.go | 38 + .../mongo-driver/mongo/index_view.go | 3 +- .../mongo-driver/mongo/mongo.go | 44 +- .../mongo-driver/mongo/mongocryptd.go | 6 + .../mongo/options/aggregateoptions.go | 23 +- .../mongo/options/autoencryptionoptions.go | 25 + .../mongo/options/bulkwriteoptions.go | 18 + .../mongo/options/changestreamoptions.go | 34 + .../mongo/options/clientencryptionoptions.go | 89 + .../mongo/options/clientoptions.go | 95 +- .../mongo/options/clientoptions_1_10.go | 13 - .../mongo/options/clientoptions_1_9.go | 20 - .../mongo/options/createcollectionoptions.go | 14 +- .../mongo/options/deleteoptions.go | 15 + .../mongo-driver/mongo/options/findoptions.go | 60 + .../mongo/options/indexoptions.go | 3 + .../mongo/options/listcollectionsoptions.go | 14 + .../mongo/options/listdatabasesoptions.go | 2 +- .../mongo/options/mongooptions.go | 4 +- .../mongo/options/replaceoptions.go | 15 + .../mongo/options/updateoptions.go | 15 + .../mongo-driver/mongo/readpref/options.go | 2 +- .../mongo-driver/mongo/readpref/readpref.go | 3 + .../mongo-driver/mongo/results.go | 26 +- .../mongo-driver/mongo/session.go | 7 +- .../mongo-driver/mongo/single_result.go | 26 + .../mongo/writeconcern/writeconcern.go | 2 +- .../mongo-driver/version/version.go | 2 +- .../x/bsonx/bsoncore/bson_documentbuilder.go | 2 +- .../mongo-driver/x/bsonx/bsoncore/bsoncore.go | 6 +- .../mongo-driver/x/bsonx/bsoncore/value.go | 4 +- .../mongo-driver/x/bsonx/document.go | 2 +- .../mongo-driver/x/bsonx/element.go | 2 - .../mongo-driver/x/bsonx/mdocument.go | 4 +- .../mongo-driver/x/bsonx/primitive_codecs.go | 1 - .../x/bsonx/reflectionfree_d_codec.go | 107 +- .../mongo-driver/x/bsonx/registry.go | 2 +- .../mongo-driver/x/bsonx/value.go | 52 +- .../mongo-driver/x/mongo/driver/DESIGN.md | 4 +- .../mongo-driver/x/mongo/driver/auth/auth.go | 7 +- .../x/mongo/driver/auth/aws_conv.go | 6 +- .../x/mongo/driver/auth/conversation.go | 2 +- .../x/mongo/driver/auth/default.go | 2 +- .../x/mongo/driver/auth/gssapi.go | 5 +- .../x/mongo/driver/auth/gssapi_not_enabled.go | 3 +- .../mongo/driver/auth/gssapi_not_supported.go | 3 +- .../mongo/driver/auth/internal/awsv4/rules.go | 20 +- .../driver/auth/internal/awsv4/signer.go | 4 +- .../mongo/driver/auth/internal/gssapi/gss.go | 5 +- .../mongo/driver/auth/internal/gssapi/sspi.go | 3 +- .../x/mongo/driver/auth/mongodbaws.go | 2 +- .../x/mongo/driver/auth/mongodbcr.go | 19 +- .../mongo-driver/x/mongo/driver/auth/sasl.go | 2 +- .../mongo-driver/x/mongo/driver/auth/util.go | 9 +- .../x/mongo/driver/batch_cursor.go | 28 +- .../mongo-driver/x/mongo/driver/batches.go | 4 - .../x/mongo/driver/connstring/connstring.go | 120 +- .../mongo-driver/x/mongo/driver/crypt.go | 78 +- .../mongo-driver/x/mongo/driver/dns/dns.go | 16 +- .../mongo-driver/x/mongo/driver/driver.go | 22 +- .../mongo-driver/x/mongo/driver/errors.go | 29 +- .../x/mongo/driver/mongocrypt/binary.go | 1 + .../x/mongo/driver/mongocrypt/errors.go | 1 + .../driver/mongocrypt/errors_not_enabled.go | 1 + .../x/mongo/driver/mongocrypt/mongocrypt.go | 1 + .../driver/mongocrypt/mongocrypt_context.go | 1 + .../mongocrypt_context_not_enabled.go | 1 + .../mongocrypt/mongocrypt_kms_context.go | 7 + .../mongocrypt_kms_context_not_enabled.go | 11 +- .../mongocrypt/mongocrypt_not_enabled.go | 1 + .../mongo-driver/x/mongo/driver/ocsp/ocsp.go | 100 +- .../mongo-driver/x/mongo/driver/operation.go | 386 +- .../driver/operation/abort_transaction.go | 8 +- .../driver/operation/abort_transaction.toml | 17 - .../x/mongo/driver/operation/aggregate.go | 39 +- .../x/mongo/driver/operation/aggregate.toml | 48 - .../x/mongo/driver/operation/command.go | 14 +- .../driver/operation/commit_transaction.go | 8 +- .../driver/operation/commit_transaction.toml | 22 - .../x/mongo/driver/operation/count.go | 28 +- .../x/mongo/driver/operation/count.toml | 26 - .../x/mongo/driver/operation/create.go | 36 +- .../x/mongo/driver/operation/create.toml | 62 - .../x/mongo/driver/operation/createIndexes.go | 27 +- .../mongo/driver/operation/createIndexes.toml | 42 - .../x/mongo/driver/operation/delete.go | 33 +- .../x/mongo/driver/operation/delete.toml | 47 - .../x/mongo/driver/operation/distinct.go | 13 +- .../x/mongo/driver/operation/distinct.toml | 37 - .../mongo/driver/operation/drop_collection.go | 17 +- .../driver/operation/drop_collection.toml | 21 - .../x/mongo/driver/operation/drop_database.go | 8 +- .../mongo/driver/operation/drop_database.toml | 18 - .../x/mongo/driver/operation/drop_indexes.go | 15 +- .../mongo/driver/operation/drop_indexes.toml | 28 - .../x/mongo/driver/operation/end_sessions.go | 10 +- .../mongo/driver/operation/end_sessions.toml | 16 - .../x/mongo/driver/operation/find.go | 24 +- .../x/mongo/driver/operation/find.toml | 105 - .../mongo/driver/operation/find_and_modify.go | 40 +- .../driver/operation/find_and_modify.toml | 73 - .../operation/{ismaster.go => hello.go} | 212 +- .../x/mongo/driver/operation/insert.go | 19 +- .../x/mongo/driver/operation/insert.toml | 45 - .../x/mongo/driver/operation/listDatabases.go | 61 +- .../mongo/driver/operation/listDatabases.toml | 37 - .../driver/operation/list_collections.go | 51 +- .../driver/operation/list_collections.toml | 23 - .../x/mongo/driver/operation/list_indexes.go | 8 +- .../mongo/driver/operation/list_indexes.toml | 20 - .../x/mongo/driver/operation/operation.go | 15 - .../x/mongo/driver/operation/update.go | 49 +- .../x/mongo/driver/operation/update.toml | 58 - .../x/mongo/driver/operation_legacy.go | 12 +- .../x/mongo/driver/session/client_session.go | 7 +- .../x/mongo/driver/session/server_session.go | 4 - .../x/mongo/driver/topology/connection.go | 171 +- .../connection_legacy_command_metadata.go | 28 - .../driver/topology/connection_options.go | 78 +- .../x/mongo/driver/topology/errors.go | 17 +- .../x/mongo/driver/topology/fsm.go | 20 +- .../driver/topology/hanging_tls_conn_1_16.go | 37 + .../driver/topology/hanging_tls_conn_1_17.go | 44 + .../x/mongo/driver/topology/pool.go | 1304 ++-- .../topology/pool_generation_counter.go | 29 +- .../x/mongo/driver/topology/resource_pool.go | 255 - .../x/mongo/driver/topology/rtt_monitor.go | 186 +- .../x/mongo/driver/topology/server.go | 338 +- .../x/mongo/driver/topology/server_options.go | 116 +- .../topology/tls_connection_source_1_16.go | 58 + ...ource.go => tls_connection_source_1_17.go} | 15 +- .../x/mongo/driver/topology/topology.go | 131 +- .../mongo/driver/topology/topology_options.go | 47 +- .../driver/topology/topology_options_1_10.go | 9 - .../driver/topology/topology_options_1_9.go | 13 - .../mongo-driver/x/mongo/driver/uuid/uuid.go | 39 +- .../x/mongo/driver/wiremessage/wiremessage.go | 8 +- .../x/crypto/chacha20/chacha_s390x.go | 1 + .../curve25519/internal/field/fe_amd64.go | 3 + vendor/golang.org/x/crypto/ed25519/ed25519.go | 188 +- .../x/crypto/ed25519/ed25519_go113.go | 74 - .../ed25519/internal/edwards25519/const.go | 1422 ---- .../internal/edwards25519/edwards25519.go | 1793 ------ .../{ => internal}/poly1305/bits_compat.go | 0 .../{ => internal}/poly1305/bits_go1.13.go | 0 .../{ => internal}/poly1305/mac_noasm.go | 0 .../{ => internal}/poly1305/poly1305.go | 2 +- .../{ => internal}/poly1305/sum_amd64.go | 0 .../{ => internal}/poly1305/sum_amd64.s | 0 .../{ => internal}/poly1305/sum_generic.go | 5 +- .../{ => internal}/poly1305/sum_ppc64le.go | 0 .../{ => internal}/poly1305/sum_ppc64le.s | 0 .../{ => internal}/poly1305/sum_s390x.go | 1 + .../{ => internal}/poly1305/sum_s390x.s | 2 +- vendor/golang.org/x/crypto/ocsp/ocsp.go | 5 +- .../x/crypto/openpgp/armor/armor.go | 18 +- .../x/crypto/openpgp/armor/encode.go | 3 +- .../x/crypto/openpgp/elgamal/elgamal.go | 10 +- .../x/crypto/openpgp/errors/errors.go | 6 + .../x/crypto/openpgp/packet/packet.go | 6 + vendor/golang.org/x/crypto/openpgp/read.go | 6 + vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 6 + vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 2 +- vendor/golang.org/x/crypto/scrypt/scrypt.go | 212 + vendor/golang.org/x/crypto/ssh/certs.go | 87 +- vendor/golang.org/x/crypto/ssh/cipher.go | 12 +- vendor/golang.org/x/crypto/ssh/client.go | 18 +- vendor/golang.org/x/crypto/ssh/client_auth.go | 132 +- vendor/golang.org/x/crypto/ssh/common.go | 76 +- vendor/golang.org/x/crypto/ssh/doc.go | 5 +- vendor/golang.org/x/crypto/ssh/handshake.go | 83 +- vendor/golang.org/x/crypto/ssh/kex.go | 186 +- vendor/golang.org/x/crypto/ssh/keys.go | 147 +- vendor/golang.org/x/crypto/ssh/messages.go | 21 +- vendor/golang.org/x/crypto/ssh/server.go | 46 +- vendor/golang.org/x/crypto/ssh/session.go | 1 + vendor/golang.org/x/crypto/ssh/transport.go | 10 +- vendor/golang.org/x/net/context/context.go | 6 +- vendor/golang.org/x/net/context/go17.go | 10 +- vendor/golang.org/x/net/context/pre_go17.go | 10 +- .../golang.org/x/net/http/httpguts/httplex.go | 64 +- vendor/golang.org/x/net/http2/Dockerfile | 2 +- vendor/golang.org/x/net/http2/README | 20 - vendor/golang.org/x/net/http2/ascii.go | 53 + .../x/net/http2/client_conn_pool.go | 123 +- vendor/golang.org/x/net/http2/errors.go | 14 +- vendor/golang.org/x/net/http2/frame.go | 65 +- vendor/golang.org/x/net/http2/go115.go | 27 + vendor/golang.org/x/net/http2/go118.go | 17 + vendor/golang.org/x/net/http2/headermap.go | 7 +- .../golang.org/x/net/http2/hpack/huffman.go | 125 +- vendor/golang.org/x/net/http2/http2.go | 14 +- vendor/golang.org/x/net/http2/not_go115.go | 31 + vendor/golang.org/x/net/http2/not_go118.go | 17 + vendor/golang.org/x/net/http2/pipe.go | 11 + vendor/golang.org/x/net/http2/server.go | 295 +- vendor/golang.org/x/net/http2/transport.go | 1669 +++-- vendor/golang.org/x/net/http2/write.go | 7 +- vendor/golang.org/x/net/http2/writesched.go | 4 +- .../x/net/http2/writesched_priority.go | 9 +- .../x/net/http2/writesched_random.go | 6 +- vendor/golang.org/x/net/idna/go118.go | 14 + vendor/golang.org/x/net/idna/idna10.0.0.go | 117 +- vendor/golang.org/x/net/idna/idna9.0.0.go | 97 +- vendor/golang.org/x/net/idna/pre_go118.go | 12 + vendor/golang.org/x/net/idna/punycode.go | 36 +- vendor/golang.org/x/net/idna/trieval.go | 34 +- vendor/golang.org/x/sync/errgroup/errgroup.go | 70 +- .../golang.org/x/sync/semaphore/semaphore.go | 136 - vendor/golang.org/x/sys/cpu/byteorder.go | 1 + vendor/golang.org/x/sys/cpu/cpu.go | 5 +- vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 4 - vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 29 +- vendor/golang.org/x/sys/cpu/cpu_loong64.go | 13 + vendor/golang.org/x/sys/cpu/cpu_x86.go | 9 +- vendor/golang.org/x/sys/cpu/cpu_x86.s | 24 - .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 2 +- vendor/golang.org/x/sys/unix/README.md | 2 +- .../golang.org/x/sys/unix/asm_linux_loong64.s | 54 + vendor/golang.org/x/sys/unix/endian_little.go | 4 +- vendor/golang.org/x/sys/unix/ifreq_linux.go | 142 + vendor/golang.org/x/sys/unix/ioctl_linux.go | 101 +- vendor/golang.org/x/sys/unix/mkall.sh | 2 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 26 +- .../golang.org/x/sys/unix/sockcmsg_linux.go | 49 + vendor/golang.org/x/sys/unix/syscall_aix.go | 50 +- vendor/golang.org/x/sys/unix/syscall_bsd.go | 59 +- .../golang.org/x/sys/unix/syscall_darwin.go | 150 +- .../x/sys/unix/syscall_dragonfly.go | 21 +- .../golang.org/x/sys/unix/syscall_freebsd.go | 15 +- .../golang.org/x/sys/unix/syscall_illumos.go | 8 + vendor/golang.org/x/sys/unix/syscall_linux.go | 364 +- .../x/sys/unix/syscall_linux_386.go | 54 +- .../x/sys/unix/syscall_linux_alarm.go | 14 + .../x/sys/unix/syscall_linux_amd64.go | 54 +- .../x/sys/unix/syscall_linux_arm.go | 51 +- .../x/sys/unix/syscall_linux_arm64.go | 61 +- .../x/sys/unix/syscall_linux_loong64.go | 226 + .../x/sys/unix/syscall_linux_mips64x.go | 44 +- .../x/sys/unix/syscall_linux_mipsx.go | 44 +- .../x/sys/unix/syscall_linux_ppc.go | 50 +- .../x/sys/unix/syscall_linux_ppc64x.go | 43 +- .../x/sys/unix/syscall_linux_riscv64.go | 57 +- .../x/sys/unix/syscall_linux_s390x.go | 49 +- .../x/sys/unix/syscall_linux_sparc64.go | 42 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 23 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 17 +- .../golang.org/x/sys/unix/syscall_solaris.go | 337 +- vendor/golang.org/x/sys/unix/syscall_unix.go | 55 + .../x/sys/unix/syscall_zos_s390x.go | 22 +- vendor/golang.org/x/sys/unix/sysvshm_linux.go | 21 + vendor/golang.org/x/sys/unix/sysvshm_unix.go | 61 + .../x/sys/unix/sysvshm_unix_other.go | 14 + .../x/sys/unix/zerrors_darwin_amd64.go | 3120 ++++----- .../x/sys/unix/zerrors_darwin_arm64.go | 3120 ++++----- .../x/sys/unix/zerrors_freebsd_386.go | 5 + .../x/sys/unix/zerrors_freebsd_amd64.go | 5 + .../x/sys/unix/zerrors_freebsd_arm.go | 5 + .../x/sys/unix/zerrors_freebsd_arm64.go | 5 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 224 +- .../x/sys/unix/zerrors_linux_386.go | 10 +- .../x/sys/unix/zerrors_linux_amd64.go | 10 +- .../x/sys/unix/zerrors_linux_arm.go | 10 +- .../x/sys/unix/zerrors_linux_arm64.go | 11 +- .../x/sys/unix/zerrors_linux_loong64.go | 818 +++ .../x/sys/unix/zerrors_linux_mips.go | 10 +- .../x/sys/unix/zerrors_linux_mips64.go | 10 +- .../x/sys/unix/zerrors_linux_mips64le.go | 10 +- .../x/sys/unix/zerrors_linux_mipsle.go | 10 +- .../x/sys/unix/zerrors_linux_ppc.go | 10 +- .../x/sys/unix/zerrors_linux_ppc64.go | 10 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 10 +- .../x/sys/unix/zerrors_linux_riscv64.go | 10 +- .../x/sys/unix/zerrors_linux_s390x.go | 10 +- .../x/sys/unix/zerrors_linux_sparc64.go | 10 +- .../x/sys/unix/zerrors_openbsd_386.go | 3 + .../x/sys/unix/zerrors_openbsd_arm.go | 3 + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 26 +- .../x/sys/unix/zsyscall_aix_ppc64.go | 24 +- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 20 +- .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 18 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 100 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 38 +- .../x/sys/unix/zsyscall_darwin_arm64.go | 100 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 38 +- .../x/sys/unix/zsyscall_freebsd_386.go | 4 +- .../x/sys/unix/zsyscall_freebsd_amd64.go | 4 +- .../x/sys/unix/zsyscall_freebsd_arm.go | 4 +- .../x/sys/unix/zsyscall_freebsd_arm64.go | 4 +- .../golang.org/x/sys/unix/zsyscall_linux.go | 217 +- .../x/sys/unix/zsyscall_linux_386.go | 54 +- .../x/sys/unix/zsyscall_linux_amd64.go | 88 +- .../x/sys/unix/zsyscall_linux_arm.go | 68 +- .../x/sys/unix/zsyscall_linux_arm64.go | 26 +- .../x/sys/unix/zsyscall_linux_loong64.go | 527 ++ .../x/sys/unix/zsyscall_linux_mips.go | 67 +- .../x/sys/unix/zsyscall_linux_mips64.go | 44 +- .../x/sys/unix/zsyscall_linux_mips64le.go | 47 +- .../x/sys/unix/zsyscall_linux_mipsle.go | 67 +- .../x/sys/unix/zsyscall_linux_ppc.go | 81 +- .../x/sys/unix/zsyscall_linux_ppc64.go | 81 +- .../x/sys/unix/zsyscall_linux_ppc64le.go | 81 +- .../x/sys/unix/zsyscall_linux_riscv64.go | 26 +- .../x/sys/unix/zsyscall_linux_s390x.go | 58 +- .../x/sys/unix/zsyscall_linux_sparc64.go | 54 +- .../x/sys/unix/zsyscall_netbsd_386.go | 16 +- .../x/sys/unix/zsyscall_netbsd_amd64.go | 16 +- .../x/sys/unix/zsyscall_netbsd_arm.go | 16 +- .../x/sys/unix/zsyscall_netbsd_arm64.go | 16 +- .../x/sys/unix/zsyscall_openbsd_386.go | 4 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 4 +- .../x/sys/unix/zsyscall_openbsd_arm.go | 4 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 4 +- .../x/sys/unix/zsyscall_openbsd_mips64.go | 4 +- .../x/sys/unix/zsyscall_solaris_amd64.go | 102 +- .../x/sys/unix/zsysnum_linux_386.go | 8 + .../x/sys/unix/zsysnum_linux_amd64.go | 716 ++- .../x/sys/unix/zsysnum_linux_arm.go | 8 + .../x/sys/unix/zsysnum_linux_arm64.go | 606 +- .../x/sys/unix/zsysnum_linux_loong64.go | 311 + .../x/sys/unix/zsysnum_linux_mips.go | 7 + .../x/sys/unix/zsysnum_linux_mips64.go | 701 +- .../x/sys/unix/zsysnum_linux_mips64le.go | 701 +- .../x/sys/unix/zsysnum_linux_mipsle.go | 7 + .../x/sys/unix/zsysnum_linux_ppc.go | 7 + .../x/sys/unix/zsysnum_linux_ppc64.go | 799 +-- .../x/sys/unix/zsysnum_linux_ppc64le.go | 799 +-- .../x/sys/unix/zsysnum_linux_riscv64.go | 604 +- .../x/sys/unix/zsysnum_linux_s390x.go | 729 +-- .../x/sys/unix/zsysnum_linux_sparc64.go | 757 +-- .../x/sys/unix/ztypes_darwin_amd64.go | 294 +- .../x/sys/unix/ztypes_darwin_arm64.go | 294 +- .../x/sys/unix/ztypes_freebsd_386.go | 2 + .../x/sys/unix/ztypes_freebsd_amd64.go | 2 + .../x/sys/unix/ztypes_freebsd_arm.go | 2 + .../x/sys/unix/ztypes_freebsd_arm64.go | 2 + .../x/sys/unix/ztypes_illumos_amd64.go | 2 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 1717 ++++- .../golang.org/x/sys/unix/ztypes_linux_386.go | 60 +- .../x/sys/unix/ztypes_linux_amd64.go | 57 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 60 +- .../x/sys/unix/ztypes_linux_arm64.go | 57 +- .../x/sys/unix/ztypes_linux_loong64.go | 685 ++ .../x/sys/unix/ztypes_linux_mips.go | 59 +- .../x/sys/unix/ztypes_linux_mips64.go | 57 +- .../x/sys/unix/ztypes_linux_mips64le.go | 57 +- .../x/sys/unix/ztypes_linux_mipsle.go | 59 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 61 +- .../x/sys/unix/ztypes_linux_ppc64.go | 56 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 56 +- .../x/sys/unix/ztypes_linux_riscv64.go | 57 +- .../x/sys/unix/ztypes_linux_s390x.go | 60 +- .../x/sys/unix/ztypes_linux_sparc64.go | 56 +- .../x/sys/unix/ztypes_openbsd_386.go | 19 +- .../x/sys/unix/ztypes_openbsd_amd64.go | 19 +- .../x/sys/unix/ztypes_openbsd_arm.go | 19 +- .../x/sys/unix/ztypes_openbsd_arm64.go | 19 +- .../x/sys/unix/ztypes_openbsd_mips64.go | 19 +- .../x/sys/unix/ztypes_solaris_amd64.go | 40 + .../x/text/secure/bidirule/bidirule10.0.0.go | 1 + .../x/text/secure/bidirule/bidirule9.0.0.go | 1 + .../x/text/unicode/bidi/tables10.0.0.go | 1 + .../x/text/unicode/bidi/tables11.0.0.go | 1 + .../x/text/unicode/bidi/tables12.0.0.go | 1 + .../x/text/unicode/bidi/tables13.0.0.go | 1 + .../x/text/unicode/bidi/tables9.0.0.go | 1 + .../x/text/unicode/norm/tables10.0.0.go | 1 + .../x/text/unicode/norm/tables11.0.0.go | 1 + .../x/text/unicode/norm/tables12.0.0.go | 1 + .../x/text/unicode/norm/tables13.0.0.go | 1 + .../x/text/unicode/norm/tables9.0.0.go | 1 + .../appengine/datastore/save.go | 4 +- .../googleapis/rpc/status/status.pb.go | 9 +- vendor/google.golang.org/grpc/.travis.yml | 42 - vendor/google.golang.org/grpc/CONTRIBUTING.md | 7 +- vendor/google.golang.org/grpc/MAINTAINERS.md | 5 +- vendor/google.golang.org/grpc/Makefile | 32 +- vendor/google.golang.org/grpc/NOTICE.txt | 13 + vendor/google.golang.org/grpc/README.md | 2 +- vendor/google.golang.org/grpc/SECURITY.md | 3 + .../grpc/attributes/attributes.go | 80 +- .../grpc/balancer/balancer.go | 114 +- .../grpc/balancer/base/balancer.go | 41 +- .../grpc/balancer/grpclb/state/state.go | 2 +- .../grpc/balancer/roundrobin/roundrobin.go | 4 +- .../grpc/balancer_conn_wrappers.go | 320 +- .../grpc_binarylog_v1/binarylog.pb.go | 40 +- .../grpc/channelz/channelz.go | 36 + vendor/google.golang.org/grpc/clientconn.go | 962 +-- .../grpc/connectivity/connectivity.go | 35 +- .../grpc/credentials/credentials.go | 68 +- .../grpc/credentials/go12.go | 30 - .../grpc/credentials/insecure/insecure.go | 98 + .../google.golang.org/grpc/credentials/tls.go | 3 + vendor/google.golang.org/grpc/dialoptions.go | 123 +- .../grpc/encoding/encoding.go | 2 +- .../grpc/encoding/proto/proto.go | 70 +- vendor/google.golang.org/grpc/go.mod | 20 +- vendor/google.golang.org/grpc/go.sum | 83 +- .../grpc/grpclog/loggerv2.go | 94 +- .../grpc/health/grpc_health_v1/health.pb.go | 2 +- .../health/grpc_health_v1/health_grpc.pb.go | 16 +- vendor/google.golang.org/grpc/install_gae.sh | 6 - vendor/google.golang.org/grpc/interceptor.go | 45 +- .../balancer/gracefulswitch/gracefulswitch.go | 382 ++ .../grpc/internal/binarylog/binarylog.go | 91 +- .../grpc/internal/binarylog/env_config.go | 6 +- .../grpc/internal/binarylog/method_logger.go | 26 +- .../grpc/internal/binarylog/sink.go | 41 +- .../grpc/internal/channelz/funcs.go | 240 +- .../grpc/internal/channelz/id.go | 75 + .../grpc/internal/channelz/logging.go | 91 +- .../grpc/internal/channelz/types.go | 23 +- .../grpc/internal/channelz/types_linux.go | 2 - .../grpc/internal/channelz/types_nonlinux.go | 5 +- .../grpc/internal/channelz/util_linux.go | 2 - .../grpc/internal/channelz/util_nonlinux.go | 3 +- .../grpc/internal/credentials/credentials.go | 49 + .../grpc/internal/credentials/spiffe.go | 2 - .../internal/credentials/spiffe_appengine.go | 31 - .../grpc/internal/credentials/syscallconn.go | 2 - .../grpc/internal/credentials/util.go | 4 +- .../grpc/internal/envconfig/envconfig.go | 3 - .../grpc/internal/envconfig/xds.go | 101 + .../grpc/internal/grpclog/grpclog.go | 8 +- .../grpc/internal/grpcrand/grpcrand.go | 29 +- .../grpcutil.go} | 16 +- .../dns/go113.go => grpcutil/regex.go} | 22 +- .../grpc/internal/grpcutil/target.go | 55 - .../grpc/internal/internal.go | 32 +- .../grpc/internal/metadata/metadata.go | 120 + .../grpc/internal/pretty/pretty.go | 82 + .../grpc/internal/resolver/config_selector.go | 167 + .../internal/resolver/dns/dns_resolver.go | 52 +- .../grpc/internal/resolver/unix/unix.go | 73 + .../internal/serviceconfig/serviceconfig.go | 76 +- .../grpc/internal/status/status.go | 14 +- .../grpc/internal/syscall/syscall_linux.go | 22 +- .../grpc/internal/syscall/syscall_nonlinux.go | 23 +- .../grpc/internal/transport/controlbuf.go | 66 +- .../grpc/internal/transport/flowcontrol.go | 4 +- .../grpc/internal/transport/handler_server.go | 3 +- .../grpc/internal/transport/http2_client.go | 426 +- .../grpc/internal/transport/http2_server.go | 389 +- .../grpc/internal/transport/http_util.go | 250 +- .../transport/networktype/networktype.go | 46 + .../grpc/{ => internal/transport}/proxy.go | 54 +- .../grpc/internal/transport/transport.go | 35 +- .../grpc/internal/xds_handshake_cluster.go | 40 + .../grpc/metadata/metadata.go | 104 +- .../google.golang.org/grpc/picker_wrapper.go | 12 +- vendor/google.golang.org/grpc/pickfirst.go | 135 +- .../grpc_reflection_v1alpha/reflection.pb.go | 2 +- .../reflection_grpc.pb.go | 16 +- .../grpc/reflection/serverreflection.go | 422 +- vendor/google.golang.org/grpc/regenerate.sh | 72 +- vendor/google.golang.org/grpc/resolver/map.go | 109 + .../grpc/resolver/resolver.go | 64 +- .../grpc/resolver_conn_wrapper.go | 94 +- vendor/google.golang.org/grpc/rpc_util.go | 100 +- vendor/google.golang.org/grpc/server.go | 325 +- .../google.golang.org/grpc/service_config.go | 79 +- vendor/google.golang.org/grpc/stats/stats.go | 11 +- .../google.golang.org/grpc/status/status.go | 32 +- vendor/google.golang.org/grpc/stream.go | 410 +- vendor/google.golang.org/grpc/tap/tap.go | 16 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 79 +- .../protobuf/encoding/protojson/decode.go | 665 ++ .../protobuf/encoding/protojson/doc.go | 11 + .../protobuf/encoding/protojson/encode.go | 344 + .../encoding/protojson/well_known_types.go | 889 +++ .../protobuf/encoding/prototext/decode.go | 33 +- .../protobuf/encoding/prototext/encode.go | 84 +- .../protobuf/encoding/protowire/wire.go | 19 +- .../protobuf/internal/descfmt/stringer.go | 2 + .../protobuf/internal/detrand/rand.go | 8 + .../protobuf/internal/encoding/json/decode.go | 340 + .../internal/encoding/json/decode_number.go | 254 + .../internal/encoding/json/decode_string.go | 91 + .../internal/encoding/json/decode_token.go | 192 + .../protobuf/internal/encoding/json/encode.go | 276 + .../encoding/messageset/messageset.go | 33 +- .../protobuf/internal/encoding/tag/tag.go | 2 +- .../protobuf/internal/encoding/text/decode.go | 2 +- .../protobuf/internal/encoding/text/encode.go | 13 +- .../protobuf/internal/errors/is_go112.go | 1 + .../protobuf/internal/errors/is_go113.go | 1 + .../protobuf/internal/fieldsort/fieldsort.go | 40 - .../protobuf/internal/filedesc/build.go | 3 + .../protobuf/internal/filedesc/desc.go | 77 +- .../protobuf/internal/filedesc/desc_lazy.go | 4 +- .../protobuf/internal/filedesc/desc_list.go | 172 +- .../internal/filedesc/desc_list_gen.go | 11 + .../internal/flags/proto_legacy_disable.go | 1 + .../internal/flags/proto_legacy_enable.go | 1 + .../protobuf/internal/impl/api_export.go | 2 +- .../protobuf/internal/impl/codec_field.go | 18 +- .../protobuf/internal/impl/codec_gen.go | 974 +-- .../protobuf/internal/impl/codec_map.go | 19 +- .../protobuf/internal/impl/codec_map_go111.go | 1 + .../protobuf/internal/impl/codec_map_go112.go | 1 + .../protobuf/internal/impl/codec_message.go | 68 +- .../internal/impl/codec_messageset.go | 21 +- .../protobuf/internal/impl/codec_reflect.go | 9 +- .../protobuf/internal/impl/codec_unsafe.go | 1 + .../protobuf/internal/impl/convert.go | 29 + .../protobuf/internal/impl/decode.go | 24 +- .../protobuf/internal/impl/encode.go | 10 +- .../protobuf/internal/impl/legacy_export.go | 2 +- .../internal/impl/legacy_extension.go | 3 +- .../protobuf/internal/impl/legacy_message.go | 129 +- .../protobuf/internal/impl/merge.go | 6 +- .../protobuf/internal/impl/message.go | 69 +- .../protobuf/internal/impl/message_reflect.go | 125 +- .../internal/impl/message_reflect_field.go | 85 +- .../protobuf/internal/impl/pointer_reflect.go | 2 + .../protobuf/internal/impl/pointer_unsafe.go | 2 + .../protobuf/internal/mapsort/mapsort.go | 43 - .../protobuf/internal/order/order.go | 89 + .../protobuf/internal/order/range.go | 115 + .../protobuf/internal/strs/strings_pure.go | 1 + .../protobuf/internal/strs/strings_unsafe.go | 1 + .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/decode.go | 35 +- .../protobuf/proto/decode_gen.go | 128 +- .../protobuf/proto/encode.go | 55 +- .../google.golang.org/protobuf/proto/equal.go | 25 +- .../protobuf/proto/messageset.go | 7 +- .../google.golang.org/protobuf/proto/proto.go | 9 + .../protobuf/proto/proto_methods.go | 1 + .../protobuf/proto/proto_reflect.go | 1 + .../protobuf/reflect/protodesc/desc.go | 276 + .../protobuf/reflect/protodesc/desc_init.go | 248 + .../reflect/protodesc/desc_resolve.go | 286 + .../reflect/protodesc/desc_validate.go | 374 ++ .../protobuf/reflect/protodesc/proto.go | 252 + .../protobuf/reflect/protoreflect/methods.go | 1 + .../protobuf/reflect/protoreflect/source.go | 84 +- .../reflect/protoreflect/source_gen.go | 461 ++ .../protobuf/reflect/protoreflect/type.go | 34 + .../reflect/protoreflect/value_pure.go | 1 + .../reflect/protoreflect/value_union.go | 25 + .../reflect/protoreflect/value_unsafe.go | 1 + .../reflect/protoregistry/registry.go | 200 +- .../protobuf/runtime/protoiface/methods.go | 1 + .../types/descriptorpb/descriptor.pb.go | 101 +- .../protobuf/types/known/anypb/any.pb.go | 22 +- .../types/known/durationpb/duration.pb.go | 20 +- .../protobuf/types/known/emptypb/empty.pb.go | 16 +- .../types/known/timestamppb/timestamp.pb.go | 29 +- vendor/modules.txt | 137 +- 1012 files changed, 62086 insertions(+), 41290 deletions(-) delete mode 100644 vendor/github.com/apparentlymart/go-textseg/LICENSE delete mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go delete mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/generate.go delete mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go delete mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl delete mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl delete mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/tables.go delete mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/unicode2ragel.rb delete mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/utf8_seqs.go delete mode 100644 vendor/github.com/fatih/color/.travis.yml delete mode 100644 vendor/github.com/fatih/color/Gopkg.lock delete mode 100644 vendor/github.com/fatih/color/Gopkg.toml create mode 100644 vendor/github.com/fatih/color/go.mod create mode 100644 vendor/github.com/fatih/color/go.sum delete mode 100644 vendor/github.com/go-stack/stack/.travis.yml create mode 100644 vendor/github.com/golang/protobuf/jsonpb/decode.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/encode.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/json.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go delete mode 100644 vendor/github.com/golang/snappy/decode_amd64.go rename vendor/github.com/{klauspost/compress/snappy/decode_amd64.s => golang/snappy/decode_arm64.s} (67%) rename vendor/github.com/{klauspost/compress/snappy/decode_amd64.go => golang/snappy/decode_asm.go} (93%) create mode 100644 vendor/github.com/golang/snappy/encode_arm64.s rename vendor/github.com/golang/snappy/{encode_amd64.go => encode_asm.go} (97%) delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/appveyor.yml delete mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/add.go delete mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/cmdstring.go delete mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/cmdstring_go112.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/force_unlock.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/graph.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/taint.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/untaint.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_delete.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_show.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/context.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/doc.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/environment_variables.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol_data.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/provider.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/generate.sh delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/generate.sh create mode 100644 vendor/github.com/hashicorp/terraform-plugin-log/internal/hclogutils/args.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-log/internal/hclogutils/logger_options.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sink.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/environment_variables.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_providers.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_validate.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_providers.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_validate.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/single_attr_body.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/context.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/environment_variables.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/helper_resource.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/helper_schema.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/keys.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/environment_variables.go create mode 100644 vendor/github.com/hashicorp/terraform-registry-address/module.go create mode 100644 vendor/github.com/hashicorp/terraform-registry-address/module_package.go create mode 100644 vendor/github.com/klauspost/compress/.gitattributes create mode 100644 vendor/github.com/klauspost/compress/.gitignore create mode 100644 vendor/github.com/klauspost/compress/.goreleaser.yml create mode 100644 vendor/github.com/klauspost/compress/README.md create mode 100644 vendor/github.com/klauspost/compress/compressible.go create mode 100644 vendor/github.com/klauspost/compress/gen.sh create mode 100644 vendor/github.com/klauspost/compress/go.mod create mode 100644 vendor/github.com/klauspost/compress/go.sum create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.s create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_generic.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s rename vendor/github.com/klauspost/compress/{snappy => internal/snapref}/LICENSE (100%) rename vendor/github.com/klauspost/compress/{snappy => internal/snapref}/decode.go (85%) rename vendor/github.com/klauspost/compress/{snappy => internal/snapref}/decode_other.go (97%) rename vendor/github.com/klauspost/compress/{snappy => internal/snapref}/encode.go (98%) rename vendor/github.com/klauspost/compress/{snappy => internal/snapref}/encode_other.go (99%) rename vendor/github.com/klauspost/compress/{snappy => internal/snapref}/snappy.go (97%) create mode 100644 vendor/github.com/klauspost/compress/s2sx.mod create mode 100644 vendor/github.com/klauspost/compress/s2sx.sum delete mode 100644 vendor/github.com/klauspost/compress/snappy/.gitignore delete mode 100644 vendor/github.com/klauspost/compress/snappy/AUTHORS delete mode 100644 vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS delete mode 100644 vendor/github.com/klauspost/compress/snappy/README delete mode 100644 vendor/github.com/klauspost/compress/snappy/encode_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/snappy/encode_amd64.s delete mode 100644 vendor/github.com/klauspost/compress/snappy/runbench.cmd create mode 100644 vendor/github.com/klauspost/compress/zstd/decodeheader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_best.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s rename vendor/github.com/klauspost/compress/zstd/internal/xxhash/{xxhash_amd64.go => xxhash_asm.go} (54%) create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zip.go delete mode 100644 vendor/github.com/mattn/go-colorable/.travis.yml create mode 100644 vendor/github.com/mattn/go-colorable/go.test.sh delete mode 100644 vendor/github.com/mattn/go-isatty/.travis.yml create mode 100644 vendor/github.com/mattn/go-isatty/go.test.sh delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_android.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/.travis.yml delete mode 100644 vendor/github.com/oklog/run/.travis.yml create mode 100644 vendor/github.com/oklog/run/actors.go create mode 100644 vendor/github.com/oklog/run/go.mod create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/.golangci.yml create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/.travis.yml create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/CHANGELOG.md create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/LICENSE create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/Makefile create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/README.md create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/appengine.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/codes/codes.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/decode.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/decode_map.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/decode_number.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/decode_query.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/decode_slice.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/decode_string.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/decode_value.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/encode.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/encode_map.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/encode_number.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/encode_slice.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/encode_value.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/ext.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/go.mod create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/go.sum create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/intern.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/msgpack.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/safe.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/time.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/types.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v4/unsafe.go create mode 100644 vendor/github.com/vmihailenco/tagparser/.travis.yml create mode 100644 vendor/github.com/vmihailenco/tagparser/LICENSE create mode 100644 vendor/github.com/vmihailenco/tagparser/Makefile create mode 100644 vendor/github.com/vmihailenco/tagparser/README.md create mode 100644 vendor/github.com/vmihailenco/tagparser/go.mod create mode 100644 vendor/github.com/vmihailenco/tagparser/internal/parser/parser.go create mode 100644 vendor/github.com/vmihailenco/tagparser/internal/safe.go create mode 100644 vendor/github.com/vmihailenco/tagparser/internal/unsafe.go create mode 100644 vendor/github.com/vmihailenco/tagparser/tagparser.go create mode 100644 vendor/github.com/youmark/pkcs8/cipher.go create mode 100644 vendor/github.com/youmark/pkcs8/cipher_3des.go create mode 100644 vendor/github.com/youmark/pkcs8/cipher_aes.go create mode 100644 vendor/github.com/youmark/pkcs8/go.mod create mode 100644 vendor/github.com/youmark/pkcs8/go.sum create mode 100644 vendor/github.com/youmark/pkcs8/kdf_pbkdf2.go create mode 100644 vendor/github.com/youmark/pkcs8/kdf_scrypt.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go create mode 100644 vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/internal/semaphore.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions_1_10.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions_1_9.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/abort_transaction.toml delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.toml delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/commit_transaction.toml delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/count.toml delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create.toml delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/createIndexes.toml delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/delete.toml delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/distinct.toml delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_collection.toml delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_database.toml delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_indexes.toml delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/end_sessions.toml delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.toml delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find_and_modify.toml rename vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/{ismaster.go => hello.go} (50%) delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/insert.toml delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/listDatabases.toml delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_collections.toml delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_indexes.toml delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/operation.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update.toml delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_legacy_command_metadata.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/hanging_tls_conn_1_16.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/hanging_tls_conn_1_17.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/resource_pool.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/tls_connection_source_1_16.go rename vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/{tls_connection_source.go => tls_connection_source_1_17.go} (66%) delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options_1_10.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options_1_9.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519_go113.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go rename vendor/golang.org/x/crypto/{ => internal}/poly1305/bits_compat.go (100%) rename vendor/golang.org/x/crypto/{ => internal}/poly1305/bits_go1.13.go (100%) rename vendor/golang.org/x/crypto/{ => internal}/poly1305/mac_noasm.go (100%) rename vendor/golang.org/x/crypto/{ => internal}/poly1305/poly1305.go (98%) rename vendor/golang.org/x/crypto/{ => internal}/poly1305/sum_amd64.go (100%) rename vendor/golang.org/x/crypto/{ => internal}/poly1305/sum_amd64.s (100%) rename vendor/golang.org/x/crypto/{ => internal}/poly1305/sum_generic.go (99%) rename vendor/golang.org/x/crypto/{ => internal}/poly1305/sum_ppc64le.go (100%) rename vendor/golang.org/x/crypto/{ => internal}/poly1305/sum_ppc64le.s (100%) rename vendor/golang.org/x/crypto/{ => internal}/poly1305/sum_s390x.go (99%) rename vendor/golang.org/x/crypto/{ => internal}/poly1305/sum_s390x.s (99%) create mode 100644 vendor/golang.org/x/crypto/scrypt/scrypt.go delete mode 100644 vendor/golang.org/x/net/http2/README create mode 100644 vendor/golang.org/x/net/http2/ascii.go create mode 100644 vendor/golang.org/x/net/http2/go115.go create mode 100644 vendor/golang.org/x/net/http2/go118.go create mode 100644 vendor/golang.org/x/net/http2/not_go115.go create mode 100644 vendor/golang.org/x/net/http2/not_go118.go create mode 100644 vendor/golang.org/x/net/idna/go118.go create mode 100644 vendor/golang.org/x/net/idna/pre_go118.go delete mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_loong64.s create mode 100644 vendor/golang.org/x/sys/unix/ifreq_linux.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_alarm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/sysvshm_linux.go create mode 100644 vendor/golang.org/x/sys/unix/sysvshm_unix.go create mode 100644 vendor/golang.org/x/sys/unix/sysvshm_unix_other.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go delete mode 100644 vendor/google.golang.org/grpc/.travis.yml create mode 100644 vendor/google.golang.org/grpc/NOTICE.txt create mode 100644 vendor/google.golang.org/grpc/SECURITY.md create mode 100644 vendor/google.golang.org/grpc/channelz/channelz.go delete mode 100644 vendor/google.golang.org/grpc/credentials/go12.go create mode 100644 vendor/google.golang.org/grpc/credentials/insecure/insecure.go delete mode 100644 vendor/google.golang.org/grpc/install_gae.sh create mode 100644 vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/id.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/credentials.go delete mode 100644 vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/xds.go rename vendor/google.golang.org/grpc/internal/{credentials/syscallconn_appengine.go => grpcutil/grpcutil.go} (72%) rename vendor/google.golang.org/grpc/internal/{resolver/dns/go113.go => grpcutil/regex.go} (63%) delete mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/target.go create mode 100644 vendor/google.golang.org/grpc/internal/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/internal/pretty/pretty.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/config_selector.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/unix/unix.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go rename vendor/google.golang.org/grpc/{ => internal/transport}/proxy.go (71%) create mode 100644 vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go create mode 100644 vendor/google.golang.org/grpc/resolver/map.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/decode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/doc.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/encode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/encode.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go delete mode 100644 vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go create mode 100644 vendor/google.golang.org/protobuf/internal/order/order.go create mode 100644 vendor/google.golang.org/protobuf/internal/order/range.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/proto.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go diff --git a/go.mod b/go.mod index c5f0bc0d2..4d2787593 100644 --- a/go.mod +++ b/go.mod @@ -3,11 +3,18 @@ module github.com/terraform-providers/terraform-provider-pagerduty go 1.16 require ( - cloud.google.com/go v0.71.0 // indirect - github.com/hashicorp/terraform-plugin-sdk/v2 v2.10.1 - github.com/heimweh/go-pagerduty v0.0.0-20220527195341-4e587aa9b58e - golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd // indirect - google.golang.org/api v0.35.0 // indirect - google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb // indirect - google.golang.org/grpc v1.33.2 // indirect + github.com/agext/levenshtein v1.2.3 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/terraform-plugin-log v0.4.1 // indirect + github.com/hashicorp/terraform-plugin-sdk/v2 v2.17.0 + github.com/hashicorp/terraform-registry-address v0.0.0-20220510144317-d78f4a47ae27 // indirect + github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 // indirect + github.com/heimweh/go-pagerduty v0.0.0-20220613181429-a35543968162 + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/oklog/run v1.1.0 // indirect + github.com/vmihailenco/tagparser v0.1.2 // indirect + golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 // indirect + golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad // indirect ) diff --git a/go.sum b/go.sum index dd09ff741..6ae581476 100644 --- a/go.sum +++ b/go.sum @@ -1,45 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.61.0/go.mod h1:XukKJg4Y7QsUu0Hxg3qQKUWR4VuWivmyMK2+rUyxAqw= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.71.0 h1:2ha722Z08cmRa0orJrzBaszYQcLbLFcsZHsGSj/kIF4= -cloud.google.com/go v0.71.0/go.mod h1:qZfY4Y7AEIQwG/fQYD3xrxLNkQZ0Xzf3HGeqCkA6LVM= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= @@ -48,13 +9,13 @@ github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/ github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/andybalholm/crlf v0.0.0-20171020200849-670099aa064f/go.mod h1:k8feO4+kXDxro6ErPXBRTJ/ro2mf0SsFG8s7doP9kJE= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/apparentlymart/go-cidr v1.0.1 h1:NmIwLZ/KdsjIUlhf+/Np40atNXm/+lZ5txfTJ/SpF+U= -github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= +github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= @@ -63,22 +24,16 @@ github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/ github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= -github.com/aws/aws-sdk-go v1.25.3 h1:uM16hIw9BotjZKMZlX05SN2EFtaWfi/NonPKIARiBLQ= -github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= -github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -88,73 +43,36 @@ github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3 github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8= github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -162,47 +80,31 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -211,114 +113,65 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= -github.com/hashicorp/go-getter v1.5.3 h1:NF5+zOlQegim+w/EUhSLh6QhXHmZMEeHLQzllkQ3ROU= -github.com/hashicorp/go-getter v1.5.3/go.mod h1:BrrV/1clo8cCYu6mxvboYg+KutTiFnXjMEgDD8+i7ZI= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.15.0 h1:qMuK0wxsoW4D0ddCCYwPSTm4KQv1X1ke3WmPWZ0Mvsk= -github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.16.1 h1:IVQwpTGNRRIHafnTs2dQLIk4ENtneRIEEJWOVDqz99o= -github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.2.1 h1:YQsLlGDJgwhXFpucSPyVbCBviQtjlHv3jLTlp8YmtEw= +github.com/hashicorp/go-hclog v1.2.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.3.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= -github.com/hashicorp/go-plugin v1.4.1 h1:6UltRQlLN9iZO513VveELp5xyaFxVD2+1OVylE+2E+w= -github.com/hashicorp/go-plugin v1.4.1/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= -github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= -github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-plugin v1.4.4 h1:NVdrSdFRt3SkZtNckJ6tog7gbpRrcbOjQi/rgF7JYWQ= +github.com/hashicorp/go-plugin v1.4.4/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.3.0 h1:McDWVJIU/y+u1BRV06dPaLfLCaT7fUTJLp5r04x7iNw= github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hc-install v0.3.1 h1:VIjllE6KyAI1A244G8kTaHXy+TL5/XYzvrtFi8po/Yk= +github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.5.0 h1:O293SZ2Eg+AAYijkVK3jR786Am1bhDEh2GHT0tIVE5E= +github.com/hashicorp/go-version v1.5.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/hc-install v0.3.1/go.mod h1:3LCdWcCDS1gaHC9mhHCGbkYfoY6vdsKohGjugbZdZak= -github.com/hashicorp/hcl/v2 v2.3.0 h1:iRly8YaMwTBAKhn1Ybk7VSdzbnopghktCD031P8ggUE= -github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= +github.com/hashicorp/hc-install v0.3.2 h1:oiQdJZvXmkNcRcEOOfM5n+VTsvNjWQeOjfAoO6dKSH8= +github.com/hashicorp/hc-install v0.3.2/go.mod h1:xMG6Tr8Fw1WFjlxH0A9v61cW15pFwgEGqEz0V4jisHs= +github.com/hashicorp/hcl/v2 v2.12.0 h1:PsYxySWpMD4KPaoJLnsHwtK5Qptvj/4Q6s0t4sUxZf4= +github.com/hashicorp/hcl/v2 v2.12.0/go.mod h1:FwWsfWEjyV/CMj8s/gqAuiviY72rJ1/oayI9WftqcKg= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-exec v0.14.0 h1:UQoUcxKTZZXhyyK68Cwn4mApT4mnFPmEXPiqaHL9r+w= -github.com/hashicorp/terraform-exec v0.14.0/go.mod h1:qrAASDq28KZiMPDnQ02sFS9udcqEkRly002EA2izXTA= -github.com/hashicorp/terraform-exec v0.15.0 h1:cqjh4d8HYNQrDoEmlSGelHmg2DYDh5yayckvJ5bV18E= -github.com/hashicorp/terraform-exec v0.15.0/go.mod h1:H4IG8ZxanU+NW0ZpDRNsvh9f0ul7C0nHP+rUR/CHs7I= -github.com/hashicorp/terraform-json v0.12.0 h1:8czPgEEWWPROStjkWPUnTQDXmpmZPlkQAwYYLETaTvw= -github.com/hashicorp/terraform-json v0.12.0/go.mod h1:pmbq9o4EuL43db5+0ogX10Yofv1nozM+wskr/bGFJpI= -github.com/hashicorp/terraform-json v0.13.0 h1:Li9L+lKD1FO5RVFRM1mMMIBDoUHslOniyEi5CM+FWGY= +github.com/hashicorp/terraform-exec v0.16.1 h1:NAwZFJW2L2SaCBVZoVaH8LPImLOGbPLkSHy0IYbs2uE= +github.com/hashicorp/terraform-exec v0.16.1/go.mod h1:aj0lVshy8l+MHhFNoijNHtqTJQI3Xlowv5EOsEaGO7M= github.com/hashicorp/terraform-json v0.13.0/go.mod h1:y5OdLBCT+rxbwnpxZs9kGL7R9ExU76+cpdY8zHwoazk= -github.com/hashicorp/terraform-plugin-go v0.3.0 h1:AJqYzP52JFYl9NABRI7smXI1pNjgR5Q/y2WyVJ/BOZA= -github.com/hashicorp/terraform-plugin-go v0.3.0/go.mod h1:dFHsQMaTLpON2gWhVWT96fvtlc/MF1vSy3OdMhWBzdM= -github.com/hashicorp/terraform-plugin-go v0.5.0 h1:+gCDdF0hcYCm0YBTxrP4+K1NGIS5ZKZBKDORBewLJmg= -github.com/hashicorp/terraform-plugin-go v0.5.0/go.mod h1:PAVN26PNGpkkmsvva1qfriae5Arky3xl3NfzKa8XFVM= -github.com/hashicorp/terraform-plugin-log v0.2.0 h1:rjflRuBqCnSk3UHOR25MP1G5BDLKktTA6lNjjcAnBfI= -github.com/hashicorp/terraform-plugin-log v0.2.0/go.mod h1:E1kJmapEHzqu1x6M++gjvhzM2yMQNXPVWZRCB8sgYjg= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.7.1 h1:vpzKKP2dIFb9n89AG8Wxl758/5JSZWZH0OuKdlq0M38= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.7.1/go.mod h1:o3pdss6ynDZW9FfiZ+rETUH5LEVufrXdhwLU+5OiRo0= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.10.1 h1:B9AocC+dxrCqcf4vVhztIkSkt3gpRjUkEka8AmZWGlQ= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.10.1/go.mod h1:FjM9DXWfP0w/AeOtJoSKHBZ01LqmaO6uP4bXhv3fekw= -github.com/hashicorp/terraform-registry-address v0.0.0-20210412075316-9b2996cce896 h1:1FGtlkJw87UsTMg5s8jrekrHmUPUJaMcu6ELiVhQrNw= +github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s= +github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= +github.com/hashicorp/terraform-plugin-go v0.9.1 h1:vXdHaQ6aqL+OF076nMSBV+JKPdmXlzG5mzVDD04WyPs= +github.com/hashicorp/terraform-plugin-go v0.9.1/go.mod h1:ItjVSlQs70otlzcCwlPcU8FRXLdO973oYFRZwAOxy8M= +github.com/hashicorp/terraform-plugin-log v0.4.0/go.mod h1:9KclxdunFownr4pIm1jdmwKRmE4d6HVG2c9XDq47rpg= +github.com/hashicorp/terraform-plugin-log v0.4.1 h1:xpbmVhvuU3mgHzLetOmx9pkOL2rmgpu302XxddON6eo= +github.com/hashicorp/terraform-plugin-log v0.4.1/go.mod h1:p4R1jWBXRTvL4odmEkFfDdhUjHf9zcs/BCoNHAc7IK4= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.17.0 h1:Qr5fWNg1SPSfCRMtou67Y6Kcy9UnMYRNlIJTKRuUvXU= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.17.0/go.mod h1:b+LFg8WpYgFgvEBP/6Htk5H9/pJp1V1E8NJAekfH2Ws= github.com/hashicorp/terraform-registry-address v0.0.0-20210412075316-9b2996cce896/go.mod h1:bzBPnUIkI0RxauU8Dqo+2KrZZ28Cf48s8V6IHt3p4co= +github.com/hashicorp/terraform-registry-address v0.0.0-20220510144317-d78f4a47ae27 h1:IOawOnLgKntezAV3oJs17rkhXha+h0EF5OMjb2KFlYc= +github.com/hashicorp/terraform-registry-address v0.0.0-20220510144317-d78f4a47ae27/go.mod h1:Wn3Na71knbXc1G8Lh+yu/dQWWJeFQEpDeJMtWMtlmNI= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/heimweh/go-pagerduty v0.0.0-20210831220234-54710c5e87d1 h1:yBAqdyzvIpGktzh8EnWqaXAtwhdgWqrP2Zk53MDH89Y= -github.com/heimweh/go-pagerduty v0.0.0-20210831220234-54710c5e87d1/go.mod h1:6+bccpjQ/PM8uQY9m8avM4MJea+3vo3ta9r8kGQ4XFY= -github.com/heimweh/go-pagerduty v0.0.0-20210930203304-530eff2acdc6 h1:/D0VtHEOCdotE1vSB9XznceAjIGkUieZ4BF6VKUIqNU= -github.com/heimweh/go-pagerduty v0.0.0-20210930203304-530eff2acdc6/go.mod h1:JtJGtgN0y9KOCaqFMZFaBCWskpO/KK3Ro9TwjP9ss6w= -github.com/heimweh/go-pagerduty v0.0.0-20211027200436-ac3c86fd0ce5 h1:lAh9VAHBA09RKMNdbwdeWuzWSDbQ3wnUaa7mzrCKsCI= -github.com/heimweh/go-pagerduty v0.0.0-20211027200436-ac3c86fd0ce5/go.mod h1:JtJGtgN0y9KOCaqFMZFaBCWskpO/KK3Ro9TwjP9ss6w= -github.com/heimweh/go-pagerduty v0.0.0-20211110145933-0ab24da93a91 h1:vqBxGv/CFAIyr1HRvG2DjOuMVHw62sP38s7p8uVfi9k= -github.com/heimweh/go-pagerduty v0.0.0-20211110145933-0ab24da93a91/go.mod h1:JtJGtgN0y9KOCaqFMZFaBCWskpO/KK3Ro9TwjP9ss6w= -github.com/heimweh/go-pagerduty v0.0.0-20211119212911-31ef1eea0d0f h1:/sgh3L7adJkcpj87qfMjAsr4mDxlIJrbGIGbS1JxxtQ= -github.com/heimweh/go-pagerduty v0.0.0-20211119212911-31ef1eea0d0f/go.mod h1:JtJGtgN0y9KOCaqFMZFaBCWskpO/KK3Ro9TwjP9ss6w= -github.com/heimweh/go-pagerduty v0.0.0-20211210233744-b65de43109c1 h1:49zl3n/g+ff/7/CpxiuqnenyxId5iAjbhgoE9iDHULc= -github.com/heimweh/go-pagerduty v0.0.0-20211210233744-b65de43109c1/go.mod h1:JtJGtgN0y9KOCaqFMZFaBCWskpO/KK3Ro9TwjP9ss6w= -github.com/heimweh/go-pagerduty v0.0.0-20220208023456-83fe435832fb h1:p3faOVCU8L4wab9mpxN9Cm/VNVkPD8GMEfD0sPHw9nY= -github.com/heimweh/go-pagerduty v0.0.0-20220208023456-83fe435832fb/go.mod h1:JtJGtgN0y9KOCaqFMZFaBCWskpO/KK3Ro9TwjP9ss6w= -github.com/heimweh/go-pagerduty v0.0.0-20220422231448-43095fe5ba3f h1:NLk7iDq85F2lz0q1gY32vZR506aYiNcgvV+Us1rX1q4= -github.com/heimweh/go-pagerduty v0.0.0-20220422231448-43095fe5ba3f/go.mod h1:JtJGtgN0y9KOCaqFMZFaBCWskpO/KK3Ro9TwjP9ss6w= -github.com/heimweh/go-pagerduty v0.0.0-20220428180718-5a69bb821163 h1:ETKxW+KSjOPaRzZU9f+QrjCkrL7hQqtMPKDv8DnLDO4= -github.com/heimweh/go-pagerduty v0.0.0-20220428180718-5a69bb821163/go.mod h1:JtJGtgN0y9KOCaqFMZFaBCWskpO/KK3Ro9TwjP9ss6w= -github.com/heimweh/go-pagerduty v0.0.0-20220527195341-4e587aa9b58e h1:xit0rQWTVlM9ohz4IzrddDKglC7jey+m3GSI/bz3TIw= -github.com/heimweh/go-pagerduty v0.0.0-20220527195341-4e587aa9b58e/go.mod h1:JtJGtgN0y9KOCaqFMZFaBCWskpO/KK3Ro9TwjP9ss6w= -github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 h1:xixZ2bWeofWV68J+x6AzmKuVM/JWCQwkWm6GW/MUR6I= +github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/heimweh/go-pagerduty v0.0.0-20220613181429-a35543968162 h1:DcleOkW9WwDfzJ/UKsdJZxEG/2feY7Qrkc0j3wSqUjc= +github.com/heimweh/go-pagerduty v0.0.0-20220613181429-a35543968162/go.mod h1:t9vftsO1IjYHGdgJXeemZtomCWnxi2SRgu0PRcRb2oY= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.11.2 h1:MiK62aErc3gIiVEtyzKfeOHgW7atJb5g/KNX5m3c2nQ= -github.com/klauspost/compress v1.11.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY= +github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -329,166 +182,112 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.4 h1:ZU1VNC02qyufSZsjjs7+khruk2fKvbQ3TwRV/IBCeFA= -github.com/mitchellh/go-testing-interface v1.0.4/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce h1:RPclfga2SEJmgMmz2k+Mg7cowZ8yv4Trqw9UsJby758= github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce/go.mod h1:uFMI8w+ref4v2r9jz+c9i1IfIttS/OkmLfrk1jne5hs= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= -github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= +github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2 h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= +github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= +github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.8.4 h1:pwhhz5P+Fjxse7S7UriBrMu6AUJSZM5pKqGem1PjGAs= -github.com/zclconf/go-cty v1.8.4/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= -github.com/zclconf/go-cty v1.9.1 h1:viqrgQwFl5UpSxc046qblj78wZXVDFnSOufaOTER+cc= +github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= github.com/zclconf/go-cty v1.9.1/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty v1.10.0 h1:mp9ZXQeIcN8kAwuqorjH+Q+njbJKjLrvB2yIh4q7U+0= +github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= -go.mongodb.org/mongo-driver v1.7.0 h1:hHrvOBWlWB2c7+8Gh/Xi5jj82AgidK/t7KVXBZ+IyUA= -go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8= -go.mongodb.org/mongo-driver v1.7.2 h1:pFttQyIiJUHEn50YfZgC9ECjITMT44oiN36uArf/OFg= -go.mongodb.org/mongo-driver v1.7.2/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.mongodb.org/mongo-driver v1.9.1 h1:m078y9v7sBItkt1aaoe2YlvWEXcD263e1a4E1fBrJ1c= +go.mongodb.org/mongo-driver v1.9.1/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b h1:7mWr3k41Qtv8XlltBkDkl8LoP3mpSgBW8BUoxtEdbXg= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -498,239 +297,110 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201026091529-146b70c837a4/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210326060303-6b1517762897 h1:KrsHThm5nFk34YtATK1LsThyGhGbGe1olrte/HInHvs= golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 h1:Yqz/iviulwKwAREEeUd3nbBFn0XuyJqkoft2IlrvOhc= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79 h1:RX8C8PRZc2hTIod4ds8ij+/4RQX3AqhYj3uOHmyaz4E= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c h1:aFV+BgZ4svzjfabn8ERpuB4JI4N6/rdy1iusx77G3oU= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201030143252-cf7a54d06671/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd h1:kJP9fbfkpUoA4y03Nxor8be+YbShcXP16fc7G4nlgpw= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.34.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.35.0 h1:TBCmTTxUrRDA1iTctnK/fIeitxIZ+TQuaf0j29fmCGo= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201030142918-24207fddd1c3/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb h1:MoNcrN5yaH+35Ge8RUwFbL7ekwq9ED2fiDpgWKrR29w= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad h1:kqrS+lhvaMHCxul6sKQvKJ8nAAhlVItmZV822hYFH/U= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -740,32 +410,26 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/github.com/agext/levenshtein/.travis.yml b/vendor/github.com/agext/levenshtein/.travis.yml index a51a14466..68d38816f 100644 --- a/vendor/github.com/agext/levenshtein/.travis.yml +++ b/vendor/github.com/agext/levenshtein/.travis.yml @@ -3,8 +3,11 @@ sudo: false matrix: fast_finish: true include: - - go: 1.11.x + - go: 1.14.x env: TEST_METHOD=goveralls + - go: 1.13.x + - go: 1.12.x + - go: 1.11.x - go: 1.10.x - go: tip - go: 1.9.x @@ -14,6 +17,8 @@ matrix: - go: 1.5.x allow_failures: - go: tip + - go: 1.11.x + - go: 1.10.x - go: 1.9.x - go: 1.8.x - go: 1.7.x diff --git a/vendor/github.com/agext/levenshtein/README.md b/vendor/github.com/agext/levenshtein/README.md index 9e4255879..d9a8ce16d 100644 --- a/vendor/github.com/agext/levenshtein/README.md +++ b/vendor/github.com/agext/levenshtein/README.md @@ -11,7 +11,7 @@ This package implements distance and similarity metrics for strings, based on th ## Project Status -v1.2.2 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis. +v1.2.3 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis. This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome. diff --git a/vendor/github.com/agext/levenshtein/levenshtein.go b/vendor/github.com/agext/levenshtein/levenshtein.go index df69ce701..56d719b83 100644 --- a/vendor/github.com/agext/levenshtein/levenshtein.go +++ b/vendor/github.com/agext/levenshtein/levenshtein.go @@ -108,7 +108,7 @@ func Calculate(str1, str2 []rune, maxCost, insCost, subCost, delCost int) (dist, for x := 0; x < l2; x++ { dy, d[doff] = d[doff], d[doff]+insCost - for d[doff] > maxCost && dlen > 0 { + for doff < l1 && d[doff] > maxCost && dlen > 0 { if str1[doff] != str2[x] { dy += subCost } diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go index ed749d4c5..20823af04 100644 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go @@ -28,6 +28,16 @@ import ( // For example, 10.3.0.0/16, extended by 8 bits, with a network number // of 5, becomes 10.3.5.0/24 . func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) { + return SubnetBig(base, newBits, big.NewInt(int64(num))) +} + +// SubnetBig takes a parent CIDR range and creates a subnet within it with the +// given number of additional prefix bits and the given network number. It +// differs from Subnet in that it takes a *big.Int for the num, instead of an int. +// +// For example, 10.3.0.0/16, extended by 8 bits, with a network number of 5, +// becomes 10.3.5.0/24 . +func SubnetBig(base *net.IPNet, newBits int, num *big.Int) (*net.IPNet, error) { ip := base.IP mask := base.Mask @@ -39,24 +49,32 @@ func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) { } maxNetNum := uint64(1< maxNetNum { + if num.Uint64() > maxNetNum { return nil, fmt.Errorf("prefix extension of %d does not accommodate a subnet numbered %d", newBits, num) } return &net.IPNet{ - IP: insertNumIntoIP(ip, big.NewInt(int64(num)), newPrefixLen), + IP: insertNumIntoIP(ip, num, newPrefixLen), Mask: net.CIDRMask(newPrefixLen, addrLen), }, nil } -// Host takes a parent CIDR range and turns it into a host IP address with -// the given host number. +// Host takes a parent CIDR range and turns it into a host IP address with the +// given host number. // // For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2. func Host(base *net.IPNet, num int) (net.IP, error) { + return HostBig(base, big.NewInt(int64(num))) +} + +// HostBig takes a parent CIDR range and turns it into a host IP address with +// the given host number. It differs from Host in that it takes a *big.Int for +// the num, instead of an int. +// +// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2. +func HostBig(base *net.IPNet, num *big.Int) (net.IP, error) { ip := base.IP mask := base.Mask - bigNum := big.NewInt(int64(num)) parentLen, addrLen := mask.Size() hostLen := addrLen - parentLen @@ -65,11 +83,11 @@ func Host(base *net.IPNet, num int) (net.IP, error) { maxHostNum.Lsh(maxHostNum, uint(hostLen)) maxHostNum.Sub(maxHostNum, big.NewInt(1)) - numUint64 := big.NewInt(int64(bigNum.Uint64())) - if bigNum.Cmp(big.NewInt(0)) == -1 { - numUint64.Neg(bigNum) + numUint64 := big.NewInt(int64(num.Uint64())) + if num.Cmp(big.NewInt(0)) == -1 { + numUint64.Neg(num) numUint64.Sub(numUint64, big.NewInt(int64(1))) - bigNum.Sub(maxHostNum, numUint64) + num.Sub(maxHostNum, numUint64) } if numUint64.Cmp(maxHostNum) == 1 { @@ -81,7 +99,7 @@ func Host(base *net.IPNet, num int) (net.IP, error) { } else { bitlength = 128 } - return insertNumIntoIP(ip, bigNum, bitlength), nil + return insertNumIntoIP(ip, num, bitlength), nil } // AddressRange returns the first and last addresses in the given CIDR range. diff --git a/vendor/github.com/apparentlymart/go-textseg/LICENSE b/vendor/github.com/apparentlymart/go-textseg/LICENSE deleted file mode 100644 index 684b03b4a..000000000 --- a/vendor/github.com/apparentlymart/go-textseg/LICENSE +++ /dev/null @@ -1,95 +0,0 @@ -Copyright (c) 2017 Martin Atkins - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------- - -Unicode table generation programs are under a separate copyright and license: - -Copyright (c) 2014 Couchbase, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the -License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, -either express or implied. See the License for the specific language governing permissions -and limitations under the License. - ---------- - -Grapheme break data is provided as part of the Unicode character database, -copright 2016 Unicode, Inc, which is provided with the following license: - -Unicode Data Files include all data files under the directories -http://www.unicode.org/Public/, http://www.unicode.org/reports/, -http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and -http://www.unicode.org/utility/trac/browser/. - -Unicode Data Files do not include PDF online code charts under the -directory http://www.unicode.org/Public/. - -Software includes any source code published in the Unicode Standard -or under the directories -http://www.unicode.org/Public/, http://www.unicode.org/reports/, -http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and -http://www.unicode.org/utility/trac/browser/. - -NOTICE TO USER: Carefully read the following legal agreement. -BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S -DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), -YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE -TERMS AND CONDITIONS OF THIS AGREEMENT. -IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE -THE DATA FILES OR SOFTWARE. - -COPYRIGHT AND PERMISSION NOTICE - -Copyright © 1991-2017 Unicode, Inc. All rights reserved. -Distributed under the Terms of Use in http://www.unicode.org/copyright.html. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of the Unicode data files and any associated documentation -(the "Data Files") or Unicode software and any associated documentation -(the "Software") to deal in the Data Files or Software -without restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, and/or sell copies of -the Data Files or Software, and to permit persons to whom the Data Files -or Software are furnished to do so, provided that either -(a) this copyright and permission notice appear with all copies -of the Data Files or Software, or -(b) this copyright and permission notice appear in associated -Documentation. - -THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT OF THIRD PARTY RIGHTS. -IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS -NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL -DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, -DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -PERFORMANCE OF THE DATA FILES OR SOFTWARE. - -Except as contained in this notice, the name of a copyright holder -shall not be used in advertising or otherwise to promote the sale, -use or other dealings in these Data Files or Software without prior -written authorization of the copyright holder. diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go b/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go deleted file mode 100644 index 5752e9ef8..000000000 --- a/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go +++ /dev/null @@ -1,30 +0,0 @@ -package textseg - -import ( - "bufio" - "bytes" -) - -// AllTokens is a utility that uses a bufio.SplitFunc to produce a slice of -// all of the recognized tokens in the given buffer. -func AllTokens(buf []byte, splitFunc bufio.SplitFunc) ([][]byte, error) { - scanner := bufio.NewScanner(bytes.NewReader(buf)) - scanner.Split(splitFunc) - var ret [][]byte - for scanner.Scan() { - ret = append(ret, scanner.Bytes()) - } - return ret, scanner.Err() -} - -// TokenCount is a utility that uses a bufio.SplitFunc to count the number of -// recognized tokens in the given buffer. -func TokenCount(buf []byte, splitFunc bufio.SplitFunc) (int, error) { - scanner := bufio.NewScanner(bytes.NewReader(buf)) - scanner.Split(splitFunc) - var ret int - for scanner.Scan() { - ret++ - } - return ret, scanner.Err() -} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go b/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go deleted file mode 100644 index 81f3a7471..000000000 --- a/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go +++ /dev/null @@ -1,7 +0,0 @@ -package textseg - -//go:generate go run make_tables.go -output tables.go -//go:generate go run make_test_tables.go -output tables_test.go -//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -m GraphemeCluster -p "Prepend,CR,LF,Control,Extend,Regional_Indicator,SpacingMark,L,V,T,LV,LVT,E_Base,E_Modifier,ZWJ,Glue_After_Zwj,E_Base_GAZ" -o grapheme_clusters_table.rl -//go:generate ragel -Z grapheme_clusters.rl -//go:generate gofmt -w grapheme_clusters.go diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go deleted file mode 100644 index 012bc690a..000000000 --- a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go +++ /dev/null @@ -1,5276 +0,0 @@ - -// line 1 "grapheme_clusters.rl" -package textseg - -import ( - "errors" - "unicode/utf8" -) - -// Generated from grapheme_clusters.rl. DO NOT EDIT - -// line 13 "grapheme_clusters.go" -var _graphclust_actions []byte = []byte{ - 0, 1, 0, 1, 4, 1, 9, 1, 10, - 1, 11, 1, 12, 1, 13, 1, 14, - 1, 15, 1, 16, 1, 17, 1, 18, - 1, 19, 1, 20, 1, 21, 2, 1, - 7, 2, 1, 8, 2, 2, 3, 2, - 5, 1, 3, 0, 1, 8, 3, 5, - 0, 1, 3, 5, 1, 6, -} - -var _graphclust_key_offsets []int16 = []int16{ - 0, 0, 1, 3, 5, 7, 10, 15, - 17, 20, 28, 31, 33, 35, 37, 67, - 75, 77, 81, 84, 89, 94, 104, 116, - 122, 127, 137, 140, 147, 151, 159, 169, - 173, 181, 183, 191, 194, 196, 201, 203, - 210, 212, 220, 221, 242, 246, 252, 257, - 259, 263, 267, 269, 273, 275, 278, 282, - 284, 291, 293, 297, 301, 305, 307, 309, - 318, 322, 327, 329, 335, 337, 338, 340, - 341, 343, 345, 347, 349, 364, 368, 370, - 372, 377, 381, 385, 387, 389, 393, 397, - 399, 403, 410, 415, 419, 422, 423, 427, - 434, 439, 440, 441, 443, 452, 454, 477, - 481, 483, 487, 491, 492, 496, 500, 503, - 505, 510, 523, 525, 527, 529, 531, 535, - 539, 541, 543, 545, 549, 553, 557, 559, - 561, 563, 565, 566, 568, 574, 580, 586, - 588, 592, 596, 601, 604, 614, 616, 618, - 621, 623, 625, 627, 629, 632, 637, 639, - 642, 650, 653, 655, 657, 659, 690, 698, - 700, 704, 711, 723, 730, 744, 750, 768, - 779, 785, 797, 800, 809, 814, 824, 830, - 844, 850, 862, 874, 878, 880, 886, 888, - 895, 898, 906, 907, 928, 937, 945, 951, - 953, 957, 961, 966, 972, 974, 977, 990, - 995, 1009, 1011, 1020, 1027, 1038, 1048, 1056, - 1067, 1071, 1076, 1078, 1080, 1082, 1083, 1085, - 1087, 1089, 1091, 1106, 1110, 1112, 1114, 1122, - 1130, 1132, 1136, 1147, 1150, 1160, 1164, 1171, - 1179, 1185, 1188, 1189, 1193, 1200, 1205, 1206, - 1207, 1209, 1218, 1220, 1243, 1248, 1250, 1259, - 1264, 1265, 1274, 1280, 1290, 1295, 1302, 1316, - 1320, 1325, 1336, 1339, 1349, 1353, 1362, 1364, - 1372, 1379, 1385, 1392, 1396, 1398, 1400, 1402, - 1403, 1405, 1411, 1419, 1425, 1427, 1431, 1435, - 1440, 1443, 1453, 1455, 1457, 1458, 1460, 1461, - 1467, 1469, 1471, 1471, 1472, 1473, 1474, 1480, - 1482, 1484, 1484, 1490, 1492, 1497, 1502, 1504, - 1506, 1508, 1511, 1516, 1518, 1521, 1529, 1532, - 1534, 1536, 1538, 1568, 1576, 1578, 1582, 1585, - 1590, 1595, 1605, 1617, 1623, 1628, 1638, 1641, - 1648, 1652, 1660, 1670, 1674, 1682, 1684, 1692, - 1695, 1697, 1702, 1704, 1711, 1713, 1721, 1722, - 1743, 1747, 1753, 1758, 1760, 1764, 1768, 1770, - 1774, 1776, 1779, 1783, 1785, 1792, 1794, 1798, - 1802, 1806, 1808, 1810, 1819, 1823, 1828, 1830, - 1836, 1838, 1839, 1841, 1842, 1844, 1846, 1848, - 1850, 1865, 1869, 1871, 1873, 1878, 1882, 1886, - 1888, 1890, 1894, 1898, 1900, 1904, 1911, 1916, - 1920, 1923, 1924, 1928, 1935, 1940, 1941, 1942, - 1944, 1953, 1955, 1978, 1982, 1984, 1988, 1992, - 1993, 1997, 2001, 2004, 2006, 2011, 2024, 2026, - 2028, 2030, 2032, 2036, 2040, 2042, 2044, 2046, - 2050, 2054, 2058, 2060, 2062, 2064, 2066, 2067, - 2069, 2075, 2081, 2087, 2089, 2093, 2097, 2102, - 2105, 2115, 2117, 2119, 2122, 2124, 2126, 2128, - 2130, 2133, 2138, 2140, 2143, 2151, 2154, 2156, - 2158, 2160, 2191, 2199, 2201, 2205, 2212, 2224, - 2231, 2245, 2251, 2269, 2280, 2286, 2298, 2301, - 2310, 2315, 2325, 2331, 2345, 2351, 2363, 2375, - 2379, 2381, 2387, 2389, 2396, 2399, 2407, 2408, - 2429, 2438, 2446, 2452, 2454, 2458, 2462, 2467, - 2473, 2475, 2478, 2491, 2496, 2510, 2512, 2521, - 2528, 2539, 2549, 2557, 2568, 2572, 2577, 2579, - 2581, 2583, 2584, 2586, 2588, 2590, 2592, 2607, - 2611, 2613, 2615, 2623, 2631, 2633, 2637, 2648, - 2651, 2661, 2665, 2672, 2680, 2686, 2689, 2690, - 2694, 2701, 2706, 2707, 2708, 2710, 2719, 2721, - 2744, 2749, 2751, 2760, 2765, 2766, 2775, 2781, - 2791, 2796, 2803, 2817, 2821, 2826, 2837, 2840, - 2850, 2854, 2863, 2865, 2873, 2880, 2886, 2893, - 2897, 2899, 2901, 2903, 2904, 2906, 2912, 2920, - 2926, 2928, 2932, 2936, 2941, 2944, 2954, 2956, - 2958, 2959, 2961, 2962, 2968, 2970, 2972, 2972, - 2973, 2974, 2975, 2981, 2983, 2985, 2985, 2991, - 2993, 2997, 3003, 3006, 3009, 3013, 3016, 3019, - 3026, 3028, 3052, 3054, 3078, 3080, 3082, 3105, - 3107, 3109, 3110, 3112, 3114, 3116, 3122, 3124, - 3156, 3160, 3165, 3188, 3190, 3192, 3194, 3196, - 3199, 3201, 3203, 3207, 3207, 3263, 3319, 3350, - 3355, 3359, 3366, 3374, 3378, 3381, 3384, 3390, - 3392, 3412, 3418, 3423, 3425, 3427, 3430, 3432, - 3434, 3438, 3494, 3550, 3581, 3586, 3594, 3598, - 3600, 3605, 3611, 3615, 3618, 3624, 3627, 3631, - 3634, 3638, 3651, 3655, 3662, 3663, 3665, 3668, - 3678, 3698, 3705, 3709, 3716, 3726, 3733, 3736, - 3751, 3753, 3756, 3761, 3763, 3766, 3769, 3773, - 3776, 3779, 3786, 3788, 3790, 3792, 3794, 3797, - 3802, 3804, 3807, 3815, 3818, 3820, 3822, 3824, - 3854, 3862, 3864, 3868, 3871, 3876, 3881, 3891, - 3903, 3909, 3914, 3924, 3927, 3934, 3938, 3946, - 3956, 3960, 3968, 3970, 3978, 3981, 3983, 3988, - 3990, 3997, 3999, 4007, 4008, 4029, 4033, 4039, - 4044, 4046, 4050, 4054, 4056, 4060, 4062, 4065, - 4069, 4071, 4078, 4080, 4084, 4088, 4092, 4094, - 4096, 4105, 4109, 4114, 4116, 4122, 4124, 4125, - 4127, 4128, 4130, 4132, 4134, 4136, 4151, 4155, - 4157, 4159, 4164, 4168, 4172, 4174, 4176, 4180, - 4184, 4186, 4190, 4197, 4202, 4206, 4209, 4210, - 4214, 4221, 4226, 4227, 4228, 4230, 4239, 4241, - 4264, 4268, 4270, 4274, 4278, 4279, 4283, 4287, - 4290, 4292, 4297, 4310, 4312, 4314, 4316, 4318, - 4322, 4326, 4328, 4330, 4332, 4336, 4340, 4344, - 4346, 4348, 4350, 4352, 4353, 4355, 4361, 4367, - 4373, 4375, 4379, 4383, 4388, 4391, 4401, 4403, - 4405, 4408, 4410, 4412, 4414, 4416, 4419, 4424, - 4426, 4429, 4437, 4440, 4442, 4444, 4446, 4477, - 4485, 4487, 4491, 4498, 4510, 4517, 4531, 4537, - 4555, 4566, 4572, 4584, 4587, 4596, 4601, 4611, - 4617, 4631, 4637, 4649, 4661, 4665, 4667, 4673, - 4675, 4682, 4685, 4693, 4694, 4715, 4724, 4732, - 4738, 4740, 4744, 4748, 4753, 4759, 4761, 4764, - 4777, 4782, 4796, 4798, 4807, 4814, 4825, 4835, - 4843, 4854, 4858, 4863, 4865, 4867, 4869, 4870, - 4872, 4874, 4876, 4878, 4893, 4897, 4899, 4901, - 4909, 4917, 4919, 4923, 4934, 4937, 4947, 4951, - 4958, 4966, 4972, 4975, 4976, 4980, 4987, 4992, - 4993, 4994, 4996, 5005, 5007, 5030, 5035, 5037, - 5046, 5051, 5052, 5061, 5067, 5077, 5082, 5089, - 5103, 5107, 5112, 5123, 5126, 5136, 5140, 5149, - 5151, 5159, 5166, 5172, 5179, 5183, 5185, 5187, - 5189, 5190, 5192, 5198, 5206, 5212, 5214, 5218, - 5222, 5227, 5230, 5240, 5242, 5244, 5245, 5247, - 5248, 5254, 5256, 5258, 5258, 5259, 5260, 5261, - 5267, 5269, 5271, 5271, 5277, 5301, 5303, 5327, - 5329, 5331, 5354, 5356, 5358, 5359, 5361, 5363, - 5365, 5371, 5373, 5405, 5409, 5414, 5437, 5439, - 5441, 5443, 5445, 5448, 5450, 5452, 5456, 5456, - 5512, 5568, 5599, 5604, 5607, 5614, 5626, 5628, - 5630, 5632, 5635, 5640, 5642, 5645, 5653, 5656, - 5658, 5660, 5662, 5692, 5700, 5702, 5706, 5709, - 5714, 5719, 5729, 5741, 5747, 5752, 5762, 5765, - 5772, 5776, 5784, 5794, 5798, 5806, 5808, 5816, - 5819, 5821, 5826, 5828, 5835, 5837, 5845, 5846, - 5867, 5871, 5877, 5882, 5884, 5888, 5892, 5894, - 5898, 5900, 5903, 5907, 5909, 5916, 5918, 5922, - 5926, 5930, 5932, 5934, 5943, 5947, 5952, 5954, - 5956, 5958, 5959, 5961, 5963, 5965, 5967, 5982, - 5986, 5988, 5990, 5995, 5999, 6003, 6005, 6007, - 6011, 6015, 6017, 6021, 6028, 6033, 6037, 6040, - 6041, 6045, 6051, 6056, 6057, 6058, 6060, 6069, - 6071, 6094, 6098, 6100, 6104, 6108, 6109, 6113, - 6117, 6120, 6122, 6127, 6140, 6142, 6144, 6146, - 6148, 6152, 6156, 6158, 6160, 6162, 6166, 6170, - 6174, 6176, 6178, 6180, 6182, 6183, 6185, 6191, - 6197, 6203, 6205, 6209, 6213, 6218, 6221, 6231, - 6233, 6235, 6236, 6242, 6244, 6246, 6246, 6252, - 6253, 6260, 6263, 6265, 6267, 6269, 6271, 6274, - 6279, 6281, 6284, 6292, 6295, 6297, 6299, 6301, - 6332, 6340, 6342, 6346, 6353, 6365, 6372, 6386, - 6392, 6410, 6421, 6427, 6439, 6442, 6451, 6456, - 6466, 6472, 6486, 6492, 6504, 6516, 6520, 6522, - 6528, 6530, 6537, 6540, 6548, 6549, 6570, 6579, - 6587, 6593, 6595, 6599, 6603, 6608, 6614, 6616, - 6619, 6632, 6637, 6651, 6653, 6662, 6669, 6680, - 6690, 6698, 6709, 6713, 6718, 6720, 6722, 6724, - 6725, 6727, 6729, 6731, 6733, 6748, 6752, 6754, - 6756, 6764, 6772, 6774, 6778, 6789, 6792, 6802, - 6806, 6813, 6821, 6827, 6830, 6831, 6835, 6842, - 6847, 6848, 6849, 6851, 6860, 6862, 6885, 6890, - 6892, 6901, 6906, 6907, 6916, 6922, 6932, 6937, - 6944, 6958, 6962, 6967, 6978, 6981, 6991, 6995, - 7004, 7006, 7014, 7021, 7027, 7034, 7038, 7040, - 7042, 7044, 7045, 7047, 7053, 7061, 7067, 7069, - 7073, 7077, 7082, 7085, 7095, 7097, 7099, 7100, - 7102, 7103, 7109, 7111, 7113, 7113, 7114, 7115, - 7121, 7124, 7126, 7128, 7130, 7133, 7138, 7140, - 7143, 7151, 7154, 7156, 7158, 7160, 7191, 7199, - 7201, 7205, 7212, 7214, 7216, 7218, 7221, 7226, - 7228, 7231, 7239, 7242, 7244, 7246, 7248, 7278, - 7286, 7288, 7292, 7295, 7300, 7305, 7315, 7327, - 7333, 7338, 7348, 7351, 7358, 7362, 7370, 7380, - 7384, 7392, 7394, 7402, 7405, 7407, 7412, 7414, - 7421, 7423, 7431, 7432, 7453, 7457, 7463, 7468, - 7470, 7474, 7478, 7480, 7484, 7486, 7489, 7493, - 7495, 7502, 7504, 7508, 7512, 7516, 7518, 7520, - 7529, 7533, 7538, 7540, 7546, 7548, 7549, 7551, - 7552, 7554, 7556, 7558, 7560, 7575, 7579, 7581, - 7583, 7588, 7592, 7596, 7598, 7600, 7604, 7608, - 7610, 7614, 7621, 7626, 7630, 7633, 7634, 7638, - 7645, 7650, 7651, 7652, 7654, 7663, 7665, 7688, - 7692, 7694, 7698, 7702, 7703, 7707, 7711, 7714, - 7716, 7721, 7734, 7736, 7738, 7740, 7742, 7746, - 7750, 7752, 7754, 7756, 7760, 7764, 7768, 7770, - 7772, 7774, 7776, 7777, 7779, 7785, 7791, 7797, - 7799, 7803, 7807, 7812, 7815, 7825, 7827, 7829, - 7832, 7834, 7835, 7836, 7837, 7843, 7845, 7847, - 7847, 7853, 7865, 7872, 7886, 7892, 7910, 7921, - 7927, 7939, 7942, 7951, 7956, 7966, 7972, 7986, - 7992, 8004, 8016, 8020, 8022, 8028, 8030, 8037, - 8040, 8048, 8049, 8070, 8079, 8087, 8093, 8095, - 8099, 8103, 8108, 8114, 8116, 8119, 8132, 8137, - 8151, 8153, 8162, 8169, 8180, 8190, 8198, 8209, - 8213, 8218, 8220, 8222, 8224, 8225, 8227, 8229, - 8231, 8233, 8248, 8252, 8254, 8256, 8264, 8272, - 8274, 8278, 8289, 8292, 8302, 8306, 8313, 8321, - 8327, 8330, 8331, 8335, 8342, 8347, 8348, 8349, - 8351, 8360, 8362, 8385, 8390, 8392, 8401, 8406, - 8407, 8416, 8422, 8432, 8437, 8444, 8458, 8462, - 8467, 8478, 8481, 8491, 8495, 8504, 8506, 8514, - 8521, 8527, 8534, 8538, 8540, 8542, 8544, 8545, - 8547, 8553, 8561, 8567, 8569, 8573, 8577, 8582, - 8585, 8595, 8597, 8599, 8600, 8602, 8603, 8609, - 8611, 8613, 8613, 8616, 8622, 8624, 8644, 8650, - 8655, 8657, 8659, 8662, 8664, 8666, 8670, 8726, - 8782, 8817, 8822, 8830, 8832, 8832, 8834, 8838, - 8841, 8848, 8854, 8858, 8861, 8867, 8870, 8876, - 8879, 8885, 8898, 8902, 8904, 8906, 8908, 8911, - 8916, 8918, 8921, 8929, 8932, 8934, 8936, 8938, - 8968, 8976, 8978, 8982, 8985, 8990, 8995, 9005, - 9017, 9023, 9028, 9038, 9041, 9048, 9052, 9060, - 9070, 9074, 9082, 9084, 9092, 9095, 9097, 9102, - 9104, 9111, 9113, 9121, 9122, 9143, 9147, 9153, - 9158, 9160, 9164, 9168, 9170, 9174, 9176, 9179, - 9183, 9185, 9192, 9194, 9198, 9202, 9206, 9208, - 9210, 9219, 9223, 9228, 9230, 9236, 9238, 9239, - 9241, 9242, 9244, 9246, 9248, 9250, 9265, 9269, - 9271, 9273, 9278, 9282, 9286, 9288, 9290, 9294, - 9298, 9300, 9304, 9311, 9316, 9320, 9323, 9324, - 9328, 9335, 9340, 9341, 9342, 9344, 9353, 9355, - 9378, 9382, 9384, 9388, 9392, 9393, 9397, 9401, - 9404, 9406, 9411, 9424, 9426, 9428, 9430, 9432, - 9436, 9440, 9442, 9444, 9446, 9450, 9454, 9458, - 9460, 9462, 9464, 9466, 9467, 9469, 9475, 9481, - 9487, 9489, 9493, 9497, 9502, 9505, 9515, 9517, - 9519, 9522, 9524, 9526, 9528, 9530, 9533, 9538, - 9540, 9543, 9551, 9554, 9556, 9558, 9560, 9591, - 9599, 9601, 9605, 9612, 9624, 9631, 9645, 9651, - 9669, 9680, 9686, 9698, 9701, 9710, 9715, 9725, - 9731, 9745, 9751, 9763, 9775, 9779, 9781, 9787, - 9789, 9796, 9799, 9807, 9808, 9829, 9838, 9846, - 9852, 9854, 9858, 9862, 9867, 9873, 9875, 9878, - 9891, 9896, 9910, 9912, 9921, 9928, 9939, 9949, - 9957, 9968, 9972, 9977, 9979, 9981, 9983, 9984, - 9986, 9988, 9990, 9992, 10007, 10011, 10013, 10015, - 10023, 10031, 10033, 10037, 10048, 10051, 10061, 10065, - 10072, 10080, 10086, 10089, 10090, 10094, 10101, 10106, - 10107, 10108, 10110, 10119, 10121, 10144, 10149, 10151, - 10160, 10165, 10166, 10175, 10181, 10191, 10196, 10203, - 10217, 10221, 10226, 10237, 10240, 10250, 10254, 10263, - 10265, 10273, 10280, 10286, 10293, 10297, 10299, 10301, - 10303, 10304, 10306, 10312, 10320, 10326, 10328, 10332, - 10336, 10341, 10344, 10354, 10356, 10358, 10359, 10361, - 10362, 10368, 10370, 10372, 10372, 10373, 10374, 10375, - 10381, 10383, 10385, 10385, 10391, 10398, 10399, 10401, - 10404, 10414, 10434, 10441, 10445, 10452, 10462, 10469, - 10472, 10487, 10489, 10492, 10501, 10505, 10509, 10538, - 10558, 10578, 10598, 10620, 10640, 10660, 10680, 10703, - 10724, 10745, 10766, 10786, 10809, 10829, 10849, 10869, - 10890, 10911, 10932, 10952, 10972, 10992, 11012, 11032, - 11052, 11072, 11092, 11112, -} - -var _graphclust_trans_keys []byte = []byte{ - 10, 128, 255, 176, 255, 131, 137, 191, - 145, 189, 135, 129, 130, 132, 133, 144, - 154, 176, 139, 159, 150, 156, 159, 164, - 167, 168, 170, 173, 145, 176, 255, 139, - 255, 166, 176, 171, 179, 160, 161, 163, - 164, 165, 167, 169, 171, 173, 174, 175, - 176, 177, 179, 180, 181, 182, 183, 184, - 185, 186, 187, 188, 189, 190, 191, 166, - 170, 172, 178, 150, 153, 155, 163, 165, - 167, 169, 173, 153, 155, 148, 161, 163, - 255, 189, 132, 185, 144, 152, 161, 164, - 255, 188, 129, 131, 190, 255, 133, 134, - 137, 138, 142, 150, 152, 161, 164, 255, - 131, 134, 137, 138, 142, 144, 146, 175, - 178, 180, 182, 255, 134, 138, 142, 161, - 164, 255, 188, 129, 131, 190, 191, 128, - 132, 135, 136, 139, 141, 150, 151, 162, - 163, 130, 190, 191, 151, 128, 130, 134, - 136, 138, 141, 128, 131, 190, 255, 133, - 137, 142, 148, 151, 161, 164, 255, 128, - 132, 134, 136, 138, 141, 149, 150, 162, - 163, 129, 131, 190, 255, 133, 137, 142, - 150, 152, 161, 164, 255, 130, 131, 138, - 150, 143, 148, 152, 159, 178, 179, 177, - 179, 186, 135, 142, 177, 179, 185, 187, - 188, 136, 141, 181, 183, 185, 152, 153, - 190, 191, 177, 191, 128, 132, 134, 135, - 141, 151, 153, 188, 134, 128, 129, 130, - 141, 156, 157, 158, 159, 160, 162, 164, - 168, 169, 170, 172, 173, 174, 175, 176, - 179, 183, 173, 183, 185, 190, 150, 153, - 158, 160, 177, 180, 130, 141, 157, 132, - 134, 157, 159, 146, 148, 178, 180, 146, - 147, 178, 179, 180, 255, 148, 156, 158, - 255, 139, 141, 169, 133, 134, 160, 171, - 176, 187, 151, 155, 160, 162, 191, 149, - 158, 165, 188, 176, 190, 128, 132, 180, - 255, 133, 170, 180, 255, 128, 130, 161, - 173, 166, 179, 164, 183, 173, 144, 146, - 148, 168, 178, 180, 184, 185, 128, 181, - 187, 191, 128, 131, 179, 181, 183, 140, - 141, 128, 131, 157, 179, 181, 183, 144, - 176, 164, 175, 177, 191, 160, 191, 128, - 130, 170, 175, 153, 154, 153, 154, 155, - 160, 162, 163, 164, 165, 166, 167, 168, - 169, 170, 171, 175, 175, 178, 180, 189, - 158, 159, 176, 177, 130, 134, 139, 163, - 167, 128, 129, 180, 255, 134, 159, 178, - 255, 166, 173, 135, 147, 128, 131, 179, - 255, 129, 164, 166, 255, 169, 182, 131, - 188, 140, 141, 176, 178, 180, 183, 184, - 190, 191, 129, 171, 175, 181, 182, 163, - 170, 172, 173, 172, 184, 190, 158, 128, - 143, 160, 175, 144, 145, 150, 155, 157, - 158, 159, 135, 139, 141, 168, 171, 189, - 160, 182, 186, 191, 129, 131, 133, 134, - 140, 143, 184, 186, 165, 166, 128, 129, - 130, 132, 133, 134, 135, 136, 139, 140, - 141, 144, 145, 146, 147, 150, 151, 152, - 153, 154, 156, 176, 178, 128, 130, 184, - 255, 135, 190, 131, 175, 187, 255, 128, - 130, 167, 180, 179, 128, 130, 179, 255, - 129, 137, 141, 255, 190, 172, 183, 159, - 170, 188, 128, 131, 190, 191, 151, 128, - 132, 135, 136, 139, 141, 162, 163, 166, - 172, 176, 180, 181, 191, 128, 134, 176, - 255, 132, 255, 175, 181, 184, 255, 129, - 155, 158, 255, 129, 255, 171, 183, 157, - 171, 175, 182, 184, 191, 146, 167, 169, - 182, 171, 172, 189, 190, 176, 180, 176, - 182, 145, 190, 143, 146, 178, 157, 158, - 133, 134, 137, 168, 169, 170, 165, 169, - 173, 178, 187, 255, 131, 132, 140, 169, - 174, 255, 130, 132, 128, 182, 187, 255, - 173, 180, 182, 255, 132, 155, 159, 161, - 175, 128, 163, 165, 128, 134, 136, 152, - 155, 161, 163, 164, 166, 170, 144, 150, - 132, 138, 145, 146, 151, 166, 169, 0, - 127, 176, 255, 131, 137, 191, 145, 189, - 135, 129, 130, 132, 133, 144, 154, 176, - 139, 159, 150, 156, 159, 164, 167, 168, - 170, 173, 145, 176, 255, 139, 255, 166, - 176, 171, 179, 160, 161, 163, 164, 165, - 166, 167, 169, 171, 172, 173, 174, 175, - 176, 177, 178, 179, 180, 181, 182, 183, - 184, 185, 186, 187, 188, 189, 190, 191, - 168, 170, 150, 153, 155, 163, 165, 167, - 169, 173, 153, 155, 148, 161, 163, 255, - 131, 187, 189, 132, 185, 190, 255, 141, - 144, 129, 136, 145, 151, 152, 161, 162, - 163, 164, 255, 129, 188, 190, 130, 131, - 191, 255, 141, 151, 129, 132, 133, 134, - 137, 138, 142, 161, 162, 163, 164, 255, - 131, 188, 129, 130, 190, 255, 145, 181, - 129, 130, 131, 134, 135, 136, 137, 138, - 139, 141, 142, 175, 176, 177, 178, 255, - 134, 138, 141, 129, 136, 142, 161, 162, - 163, 164, 255, 129, 188, 130, 131, 190, - 191, 128, 141, 129, 132, 135, 136, 139, - 140, 150, 151, 162, 163, 130, 190, 191, - 128, 141, 151, 129, 130, 134, 136, 138, - 140, 128, 129, 131, 190, 255, 133, 137, - 129, 132, 142, 148, 151, 161, 164, 255, - 129, 188, 190, 191, 130, 131, 130, 134, - 128, 132, 135, 136, 138, 139, 140, 141, - 149, 150, 162, 163, 129, 190, 130, 131, - 191, 255, 133, 137, 141, 151, 129, 132, - 142, 161, 162, 163, 164, 255, 138, 143, - 150, 159, 144, 145, 146, 148, 152, 158, - 178, 179, 177, 179, 180, 186, 135, 142, - 177, 179, 180, 185, 187, 188, 136, 141, - 181, 183, 185, 152, 153, 190, 191, 191, - 177, 190, 128, 132, 134, 135, 141, 151, - 153, 188, 134, 128, 129, 130, 141, 156, - 157, 158, 159, 160, 162, 164, 168, 169, - 170, 172, 173, 174, 175, 176, 179, 183, - 177, 173, 183, 185, 186, 187, 188, 189, - 190, 150, 151, 152, 153, 158, 160, 177, - 180, 130, 132, 141, 157, 133, 134, 157, - 159, 146, 148, 178, 180, 146, 147, 178, - 179, 182, 180, 189, 190, 255, 134, 157, - 137, 147, 148, 255, 139, 141, 169, 133, - 134, 178, 160, 162, 163, 166, 167, 168, - 169, 171, 176, 184, 185, 187, 155, 151, - 152, 153, 154, 150, 160, 162, 191, 149, - 151, 152, 158, 165, 172, 173, 178, 179, - 188, 176, 190, 132, 181, 187, 128, 131, - 180, 188, 189, 255, 130, 133, 170, 171, - 179, 180, 255, 130, 161, 170, 128, 129, - 162, 165, 166, 167, 168, 173, 167, 173, - 166, 169, 170, 174, 175, 177, 178, 179, - 164, 171, 172, 179, 180, 181, 182, 183, - 161, 173, 180, 144, 146, 148, 168, 178, - 179, 184, 185, 128, 181, 187, 191, 128, - 131, 179, 181, 183, 140, 141, 144, 176, - 175, 177, 191, 160, 191, 128, 130, 170, - 175, 153, 154, 153, 154, 155, 160, 162, - 163, 164, 165, 166, 167, 168, 169, 170, - 171, 175, 175, 178, 180, 189, 158, 159, - 176, 177, 130, 134, 139, 167, 163, 164, - 165, 166, 132, 133, 134, 159, 160, 177, - 178, 255, 166, 173, 135, 145, 146, 147, - 131, 179, 188, 128, 130, 180, 181, 182, - 185, 186, 255, 165, 129, 255, 169, 174, - 175, 176, 177, 178, 179, 180, 181, 182, - 131, 140, 141, 188, 176, 178, 180, 183, - 184, 190, 191, 129, 171, 181, 182, 172, - 173, 174, 175, 165, 168, 172, 173, 163, - 170, 172, 184, 190, 158, 128, 143, 160, - 175, 144, 145, 150, 155, 157, 158, 159, - 135, 139, 141, 168, 171, 189, 160, 182, - 186, 191, 129, 131, 133, 134, 140, 143, - 184, 186, 165, 166, 128, 129, 130, 132, - 133, 134, 135, 136, 139, 140, 141, 144, - 145, 146, 147, 150, 151, 152, 153, 154, - 156, 176, 178, 129, 128, 130, 184, 255, - 135, 190, 130, 131, 175, 176, 178, 183, - 184, 187, 255, 172, 128, 130, 167, 180, - 179, 130, 128, 129, 179, 181, 182, 190, - 191, 255, 129, 137, 138, 140, 141, 255, - 180, 190, 172, 174, 175, 177, 178, 181, - 182, 183, 159, 160, 162, 163, 170, 188, - 190, 191, 128, 129, 130, 131, 128, 151, - 129, 132, 135, 136, 139, 141, 162, 163, - 166, 172, 176, 180, 181, 183, 184, 191, - 133, 128, 129, 130, 134, 176, 185, 189, - 177, 178, 179, 186, 187, 190, 191, 255, - 129, 132, 255, 175, 190, 176, 177, 178, - 181, 184, 187, 188, 255, 129, 155, 158, - 255, 189, 176, 178, 179, 186, 187, 190, - 191, 255, 129, 255, 172, 182, 171, 173, - 174, 175, 176, 183, 166, 157, 159, 160, - 161, 162, 171, 175, 190, 176, 182, 184, - 191, 169, 177, 180, 146, 167, 170, 182, - 171, 172, 189, 190, 176, 180, 176, 182, - 143, 146, 178, 157, 158, 133, 134, 137, - 168, 169, 170, 166, 173, 165, 169, 174, - 178, 187, 255, 131, 132, 140, 169, 174, - 255, 130, 132, 128, 182, 187, 255, 173, - 180, 182, 255, 132, 155, 159, 161, 175, - 128, 163, 165, 128, 134, 136, 152, 155, - 161, 163, 164, 166, 170, 144, 150, 132, - 138, 143, 187, 191, 160, 128, 129, 132, - 135, 133, 134, 160, 255, 192, 255, 139, - 168, 160, 128, 129, 132, 135, 133, 134, - 160, 255, 192, 255, 144, 145, 150, 155, - 157, 158, 128, 191, 173, 128, 159, 160, - 191, 156, 128, 133, 134, 191, 0, 127, - 176, 255, 131, 137, 191, 145, 189, 135, - 129, 130, 132, 133, 144, 154, 176, 139, - 159, 150, 156, 159, 164, 167, 168, 170, - 173, 145, 176, 255, 139, 255, 166, 176, - 171, 179, 160, 161, 163, 164, 165, 167, - 169, 171, 173, 174, 175, 176, 177, 179, - 180, 181, 182, 183, 184, 185, 186, 187, - 188, 189, 190, 191, 166, 170, 172, 178, - 150, 153, 155, 163, 165, 167, 169, 173, - 153, 155, 148, 161, 163, 255, 189, 132, - 185, 144, 152, 161, 164, 255, 188, 129, - 131, 190, 255, 133, 134, 137, 138, 142, - 150, 152, 161, 164, 255, 131, 134, 137, - 138, 142, 144, 146, 175, 178, 180, 182, - 255, 134, 138, 142, 161, 164, 255, 188, - 129, 131, 190, 191, 128, 132, 135, 136, - 139, 141, 150, 151, 162, 163, 130, 190, - 191, 151, 128, 130, 134, 136, 138, 141, - 128, 131, 190, 255, 133, 137, 142, 148, - 151, 161, 164, 255, 128, 132, 134, 136, - 138, 141, 149, 150, 162, 163, 129, 131, - 190, 255, 133, 137, 142, 150, 152, 161, - 164, 255, 130, 131, 138, 150, 143, 148, - 152, 159, 178, 179, 177, 179, 186, 135, - 142, 177, 179, 185, 187, 188, 136, 141, - 181, 183, 185, 152, 153, 190, 191, 177, - 191, 128, 132, 134, 135, 141, 151, 153, - 188, 134, 128, 129, 130, 141, 156, 157, - 158, 159, 160, 162, 164, 168, 169, 170, - 172, 173, 174, 175, 176, 179, 183, 173, - 183, 185, 190, 150, 153, 158, 160, 177, - 180, 130, 141, 157, 132, 134, 157, 159, - 146, 148, 178, 180, 146, 147, 178, 179, - 180, 255, 148, 156, 158, 255, 139, 141, - 169, 133, 134, 160, 171, 176, 187, 151, - 155, 160, 162, 191, 149, 158, 165, 188, - 176, 190, 128, 132, 180, 255, 133, 170, - 180, 255, 128, 130, 161, 173, 166, 179, - 164, 183, 173, 144, 146, 148, 168, 178, - 180, 184, 185, 128, 181, 187, 191, 128, - 131, 179, 181, 183, 140, 141, 128, 131, - 157, 179, 181, 183, 144, 176, 164, 175, - 177, 191, 160, 191, 128, 130, 170, 175, - 153, 154, 153, 154, 155, 160, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, - 175, 175, 178, 180, 189, 158, 159, 176, - 177, 130, 134, 139, 163, 167, 128, 129, - 180, 255, 134, 159, 178, 255, 166, 173, - 135, 147, 128, 131, 179, 255, 129, 164, - 166, 255, 169, 182, 131, 188, 140, 141, - 176, 178, 180, 183, 184, 190, 191, 129, - 171, 175, 181, 182, 163, 170, 172, 173, - 172, 184, 190, 158, 128, 143, 160, 175, - 144, 145, 150, 155, 157, 158, 159, 135, - 139, 141, 168, 171, 189, 160, 182, 186, - 191, 129, 131, 133, 134, 140, 143, 184, - 186, 165, 166, 128, 129, 130, 132, 133, - 134, 135, 136, 139, 140, 141, 144, 145, - 146, 147, 150, 151, 152, 153, 154, 156, - 176, 178, 128, 130, 184, 255, 135, 190, - 131, 175, 187, 255, 128, 130, 167, 180, - 179, 128, 130, 179, 255, 129, 137, 141, - 255, 190, 172, 183, 159, 170, 188, 128, - 131, 190, 191, 151, 128, 132, 135, 136, - 139, 141, 162, 163, 166, 172, 176, 180, - 181, 191, 128, 134, 176, 255, 132, 255, - 175, 181, 184, 255, 129, 155, 158, 255, - 129, 255, 171, 183, 157, 171, 175, 182, - 184, 191, 146, 167, 169, 182, 171, 172, - 189, 190, 176, 180, 176, 182, 145, 190, - 143, 146, 178, 157, 158, 133, 134, 137, - 168, 169, 170, 165, 169, 173, 178, 187, - 255, 131, 132, 140, 169, 174, 255, 130, - 132, 128, 182, 187, 255, 173, 180, 182, - 255, 132, 155, 159, 161, 175, 128, 163, - 165, 128, 134, 136, 152, 155, 161, 163, - 164, 166, 170, 144, 150, 132, 138, 145, - 146, 151, 166, 169, 128, 255, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 171, 179, - 160, 161, 163, 164, 165, 166, 167, 169, - 171, 172, 173, 174, 175, 176, 177, 178, - 179, 180, 181, 182, 183, 184, 185, 186, - 187, 188, 189, 190, 191, 168, 170, 150, - 153, 155, 163, 165, 167, 169, 173, 153, - 155, 148, 161, 163, 255, 131, 187, 189, - 132, 185, 190, 255, 141, 144, 129, 136, - 145, 151, 152, 161, 162, 163, 164, 255, - 129, 188, 190, 130, 131, 191, 255, 141, - 151, 129, 132, 133, 134, 137, 138, 142, - 161, 162, 163, 164, 255, 131, 188, 129, - 130, 190, 255, 145, 181, 129, 130, 131, - 134, 135, 136, 137, 138, 139, 141, 142, - 175, 176, 177, 178, 255, 134, 138, 141, - 129, 136, 142, 161, 162, 163, 164, 255, - 129, 188, 130, 131, 190, 191, 128, 141, - 129, 132, 135, 136, 139, 140, 150, 151, - 162, 163, 130, 190, 191, 128, 141, 151, - 129, 130, 134, 136, 138, 140, 128, 129, - 131, 190, 255, 133, 137, 129, 132, 142, - 148, 151, 161, 164, 255, 129, 188, 190, - 191, 130, 131, 130, 134, 128, 132, 135, - 136, 138, 139, 140, 141, 149, 150, 162, - 163, 129, 190, 130, 131, 191, 255, 133, - 137, 141, 151, 129, 132, 142, 161, 162, - 163, 164, 255, 138, 143, 150, 159, 144, - 145, 146, 148, 152, 158, 178, 179, 177, - 179, 180, 186, 135, 142, 177, 179, 180, - 185, 187, 188, 136, 141, 181, 183, 185, - 152, 153, 190, 191, 191, 177, 190, 128, - 132, 134, 135, 141, 151, 153, 188, 134, - 128, 129, 130, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 177, 173, 183, - 185, 186, 187, 188, 189, 190, 150, 151, - 152, 153, 158, 160, 177, 180, 130, 132, - 141, 157, 133, 134, 157, 159, 146, 148, - 178, 180, 146, 147, 178, 179, 182, 180, - 189, 190, 255, 134, 157, 137, 147, 148, - 255, 139, 141, 169, 133, 134, 178, 160, - 162, 163, 166, 167, 168, 169, 171, 176, - 184, 185, 187, 155, 151, 152, 153, 154, - 150, 160, 162, 191, 149, 151, 152, 158, - 165, 172, 173, 178, 179, 188, 176, 190, - 132, 181, 187, 128, 131, 180, 188, 189, - 255, 130, 133, 170, 171, 179, 180, 255, - 130, 161, 170, 128, 129, 162, 165, 166, - 167, 168, 173, 167, 173, 166, 169, 170, - 174, 175, 177, 178, 179, 164, 171, 172, - 179, 180, 181, 182, 183, 161, 173, 180, - 144, 146, 148, 168, 178, 179, 184, 185, - 128, 181, 187, 191, 128, 131, 179, 181, - 183, 140, 141, 144, 176, 175, 177, 191, - 160, 191, 128, 130, 170, 175, 153, 154, - 153, 154, 155, 160, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 175, 175, - 178, 180, 189, 158, 159, 176, 177, 130, - 134, 139, 167, 163, 164, 165, 166, 132, - 133, 134, 159, 160, 177, 178, 255, 166, - 173, 135, 145, 146, 147, 131, 179, 188, - 128, 130, 180, 181, 182, 185, 186, 255, - 165, 129, 255, 169, 174, 175, 176, 177, - 178, 179, 180, 181, 182, 131, 140, 141, - 188, 176, 178, 180, 183, 184, 190, 191, - 129, 171, 181, 182, 172, 173, 174, 175, - 165, 168, 172, 173, 163, 170, 172, 184, - 190, 158, 128, 143, 160, 175, 144, 145, - 150, 155, 157, 158, 159, 135, 139, 141, - 168, 171, 189, 160, 182, 186, 191, 129, - 131, 133, 134, 140, 143, 184, 186, 165, - 166, 128, 129, 130, 132, 133, 134, 135, - 136, 139, 140, 141, 144, 145, 146, 147, - 150, 151, 152, 153, 154, 156, 176, 178, - 129, 128, 130, 184, 255, 135, 190, 130, - 131, 175, 176, 178, 183, 184, 187, 255, - 172, 128, 130, 167, 180, 179, 130, 128, - 129, 179, 181, 182, 190, 191, 255, 129, - 137, 138, 140, 141, 255, 180, 190, 172, - 174, 175, 177, 178, 181, 182, 183, 159, - 160, 162, 163, 170, 188, 190, 191, 128, - 129, 130, 131, 128, 151, 129, 132, 135, - 136, 139, 141, 162, 163, 166, 172, 176, - 180, 181, 183, 184, 191, 133, 128, 129, - 130, 134, 176, 185, 189, 177, 178, 179, - 186, 187, 190, 191, 255, 129, 132, 255, - 175, 190, 176, 177, 178, 181, 184, 187, - 188, 255, 129, 155, 158, 255, 189, 176, - 178, 179, 186, 187, 190, 191, 255, 129, - 255, 172, 182, 171, 173, 174, 175, 176, - 183, 166, 157, 159, 160, 161, 162, 171, - 175, 190, 176, 182, 184, 191, 169, 177, - 180, 146, 167, 170, 182, 171, 172, 189, - 190, 176, 180, 176, 182, 143, 146, 178, - 157, 158, 133, 134, 137, 168, 169, 170, - 166, 173, 165, 169, 174, 178, 187, 255, - 131, 132, 140, 169, 174, 255, 130, 132, - 128, 182, 187, 255, 173, 180, 182, 255, - 132, 155, 159, 161, 175, 128, 163, 165, - 128, 134, 136, 152, 155, 161, 163, 164, - 166, 170, 144, 150, 132, 138, 143, 187, - 191, 160, 128, 129, 132, 135, 133, 134, - 160, 255, 192, 255, 139, 168, 160, 128, - 129, 132, 135, 133, 134, 160, 255, 192, - 255, 144, 145, 150, 155, 157, 158, 128, - 191, 160, 172, 174, 191, 128, 133, 134, - 155, 157, 191, 157, 128, 191, 143, 128, - 191, 163, 181, 128, 191, 162, 128, 191, - 142, 128, 191, 132, 133, 134, 135, 160, - 128, 191, 128, 255, 128, 129, 130, 132, - 133, 134, 141, 156, 157, 158, 159, 160, - 162, 164, 168, 169, 170, 172, 173, 174, - 175, 176, 179, 183, 160, 255, 128, 129, - 130, 133, 134, 135, 141, 156, 157, 158, - 159, 160, 162, 164, 168, 169, 170, 172, - 173, 174, 175, 176, 179, 183, 160, 255, - 168, 255, 128, 129, 130, 134, 135, 141, - 156, 157, 158, 159, 160, 162, 164, 168, - 169, 170, 172, 173, 174, 175, 176, 179, - 183, 168, 255, 192, 255, 159, 139, 187, - 158, 159, 176, 255, 135, 138, 139, 187, - 188, 255, 168, 255, 153, 154, 155, 160, - 162, 163, 164, 165, 166, 167, 168, 169, - 170, 171, 175, 177, 178, 179, 180, 181, - 182, 184, 185, 186, 187, 188, 189, 191, - 176, 190, 192, 255, 135, 147, 160, 188, - 128, 156, 184, 129, 255, 128, 129, 130, - 133, 134, 141, 156, 157, 158, 159, 160, - 162, 164, 168, 169, 170, 172, 173, 174, - 175, 176, 179, 183, 158, 159, 135, 255, - 148, 176, 140, 168, 132, 160, 188, 152, - 180, 144, 172, 136, 164, 192, 255, 129, - 130, 131, 132, 133, 134, 136, 137, 138, - 139, 140, 141, 143, 144, 145, 146, 147, - 148, 150, 151, 152, 153, 154, 155, 157, - 158, 159, 160, 161, 162, 164, 165, 166, - 167, 168, 169, 171, 172, 173, 174, 175, - 176, 178, 179, 180, 181, 182, 183, 185, - 186, 187, 188, 189, 190, 128, 191, 129, - 130, 131, 132, 133, 134, 136, 137, 138, - 139, 140, 141, 143, 144, 145, 146, 147, - 148, 150, 151, 152, 153, 154, 155, 157, - 158, 159, 160, 161, 162, 164, 165, 166, - 167, 168, 169, 171, 172, 173, 174, 175, - 176, 178, 179, 180, 181, 182, 183, 185, - 186, 187, 188, 189, 190, 128, 191, 129, - 130, 131, 132, 133, 134, 136, 137, 138, - 139, 140, 141, 143, 144, 145, 146, 147, - 148, 150, 151, 152, 153, 154, 155, 157, - 158, 159, 128, 156, 160, 255, 136, 164, - 175, 176, 255, 128, 141, 143, 191, 128, - 129, 152, 155, 156, 130, 191, 140, 141, - 128, 138, 144, 167, 175, 191, 128, 159, - 176, 191, 157, 128, 191, 185, 128, 191, - 128, 137, 138, 141, 142, 191, 128, 191, - 165, 177, 178, 179, 180, 181, 182, 184, - 185, 186, 187, 188, 189, 191, 128, 175, - 176, 190, 192, 255, 128, 159, 160, 188, - 189, 191, 128, 156, 184, 129, 255, 148, - 176, 140, 168, 132, 160, 188, 152, 180, - 144, 172, 136, 164, 192, 255, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 160, 161, 162, 164, 165, 166, 167, - 168, 169, 171, 172, 173, 174, 175, 176, - 178, 179, 180, 181, 182, 183, 185, 186, - 187, 188, 189, 190, 128, 191, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 160, 161, 162, 164, 165, 166, 167, - 168, 169, 171, 172, 173, 174, 175, 176, - 178, 179, 180, 181, 182, 183, 185, 186, - 187, 188, 189, 190, 128, 191, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 128, 156, 160, 255, 136, 164, 175, - 176, 255, 135, 138, 139, 187, 188, 191, - 192, 255, 187, 191, 128, 190, 128, 190, - 188, 128, 175, 190, 191, 145, 155, 157, - 159, 128, 191, 130, 135, 128, 191, 189, - 128, 191, 128, 129, 130, 131, 132, 191, - 178, 128, 191, 128, 159, 164, 191, 133, - 128, 191, 128, 178, 187, 191, 135, 142, - 143, 145, 146, 149, 150, 153, 154, 155, - 164, 128, 191, 128, 165, 166, 191, 144, - 145, 150, 155, 157, 158, 159, 135, 166, - 191, 133, 128, 191, 128, 130, 131, 132, - 133, 137, 138, 139, 140, 191, 174, 188, - 128, 129, 130, 131, 132, 133, 134, 144, - 145, 165, 166, 169, 170, 175, 176, 184, - 185, 191, 128, 132, 170, 129, 135, 136, - 191, 181, 186, 128, 191, 144, 128, 148, - 149, 150, 151, 191, 128, 132, 133, 135, - 136, 138, 139, 143, 144, 191, 163, 128, - 179, 180, 182, 183, 191, 128, 129, 191, - 166, 176, 191, 128, 151, 152, 158, 159, - 178, 179, 185, 186, 187, 188, 190, 128, - 191, 160, 128, 191, 128, 129, 135, 132, - 134, 128, 175, 157, 128, 191, 143, 128, - 191, 163, 181, 128, 191, 162, 128, 191, - 142, 128, 191, 132, 133, 134, 135, 160, - 128, 191, 0, 127, 128, 255, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 171, 179, - 160, 161, 163, 164, 165, 167, 169, 171, - 173, 174, 175, 176, 177, 179, 180, 181, - 182, 183, 184, 185, 186, 187, 188, 189, - 190, 191, 166, 170, 172, 178, 150, 153, - 155, 163, 165, 167, 169, 173, 153, 155, - 148, 161, 163, 255, 189, 132, 185, 144, - 152, 161, 164, 255, 188, 129, 131, 190, - 255, 133, 134, 137, 138, 142, 150, 152, - 161, 164, 255, 131, 134, 137, 138, 142, - 144, 146, 175, 178, 180, 182, 255, 134, - 138, 142, 161, 164, 255, 188, 129, 131, - 190, 191, 128, 132, 135, 136, 139, 141, - 150, 151, 162, 163, 130, 190, 191, 151, - 128, 130, 134, 136, 138, 141, 128, 131, - 190, 255, 133, 137, 142, 148, 151, 161, - 164, 255, 128, 132, 134, 136, 138, 141, - 149, 150, 162, 163, 129, 131, 190, 255, - 133, 137, 142, 150, 152, 161, 164, 255, - 130, 131, 138, 150, 143, 148, 152, 159, - 178, 179, 177, 179, 186, 135, 142, 177, - 179, 185, 187, 188, 136, 141, 181, 183, - 185, 152, 153, 190, 191, 177, 191, 128, - 132, 134, 135, 141, 151, 153, 188, 134, - 128, 129, 130, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 173, 183, 185, - 190, 150, 153, 158, 160, 177, 180, 130, - 141, 157, 132, 134, 157, 159, 146, 148, - 178, 180, 146, 147, 178, 179, 180, 255, - 148, 156, 158, 255, 139, 141, 169, 133, - 134, 160, 171, 176, 187, 151, 155, 160, - 162, 191, 149, 158, 165, 188, 176, 190, - 128, 132, 180, 255, 133, 170, 180, 255, - 128, 130, 161, 173, 166, 179, 164, 183, - 173, 144, 146, 148, 168, 178, 180, 184, - 185, 128, 181, 187, 191, 128, 131, 179, - 181, 183, 140, 141, 128, 131, 157, 179, - 181, 183, 144, 176, 164, 175, 177, 191, - 160, 191, 128, 130, 170, 175, 153, 154, - 153, 154, 155, 160, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 175, 175, - 178, 180, 189, 158, 159, 176, 177, 130, - 134, 139, 163, 167, 128, 129, 180, 255, - 134, 159, 178, 255, 166, 173, 135, 147, - 128, 131, 179, 255, 129, 164, 166, 255, - 169, 182, 131, 188, 140, 141, 176, 178, - 180, 183, 184, 190, 191, 129, 171, 175, - 181, 182, 163, 170, 172, 173, 172, 184, - 190, 158, 128, 143, 160, 175, 144, 145, - 150, 155, 157, 158, 159, 135, 139, 141, - 168, 171, 189, 160, 182, 186, 191, 129, - 131, 133, 134, 140, 143, 184, 186, 165, - 166, 128, 129, 130, 132, 133, 134, 135, - 136, 139, 140, 141, 144, 145, 146, 147, - 150, 151, 152, 153, 154, 156, 176, 178, - 128, 130, 184, 255, 135, 190, 131, 175, - 187, 255, 128, 130, 167, 180, 179, 128, - 130, 179, 255, 129, 137, 141, 255, 190, - 172, 183, 159, 170, 188, 128, 131, 190, - 191, 151, 128, 132, 135, 136, 139, 141, - 162, 163, 166, 172, 176, 180, 181, 191, - 128, 134, 176, 255, 132, 255, 175, 181, - 184, 255, 129, 155, 158, 255, 129, 255, - 171, 183, 157, 171, 175, 182, 184, 191, - 146, 167, 169, 182, 171, 172, 189, 190, - 176, 180, 176, 182, 145, 190, 143, 146, - 178, 157, 158, 133, 134, 137, 168, 169, - 170, 165, 169, 173, 178, 187, 255, 131, - 132, 140, 169, 174, 255, 130, 132, 128, - 182, 187, 255, 173, 180, 182, 255, 132, - 155, 159, 161, 175, 128, 163, 165, 128, - 134, 136, 152, 155, 161, 163, 164, 166, - 170, 144, 150, 132, 138, 145, 146, 151, - 166, 169, 128, 255, 176, 255, 131, 137, - 191, 145, 189, 135, 129, 130, 132, 133, - 144, 154, 176, 139, 159, 150, 156, 159, - 164, 167, 168, 170, 173, 145, 176, 255, - 139, 255, 166, 176, 171, 179, 160, 161, - 163, 164, 165, 166, 167, 169, 171, 172, - 173, 174, 175, 176, 177, 178, 179, 180, - 181, 182, 183, 184, 185, 186, 187, 188, - 189, 190, 191, 168, 170, 150, 153, 155, - 163, 165, 167, 169, 173, 153, 155, 148, - 161, 163, 255, 131, 187, 189, 132, 185, - 190, 255, 141, 144, 129, 136, 145, 151, - 152, 161, 162, 163, 164, 255, 129, 188, - 190, 130, 131, 191, 255, 141, 151, 129, - 132, 133, 134, 137, 138, 142, 161, 162, - 163, 164, 255, 131, 188, 129, 130, 190, - 255, 145, 181, 129, 130, 131, 134, 135, - 136, 137, 138, 139, 141, 142, 175, 176, - 177, 178, 255, 134, 138, 141, 129, 136, - 142, 161, 162, 163, 164, 255, 129, 188, - 130, 131, 190, 191, 128, 141, 129, 132, - 135, 136, 139, 140, 150, 151, 162, 163, - 130, 190, 191, 128, 141, 151, 129, 130, - 134, 136, 138, 140, 128, 129, 131, 190, - 255, 133, 137, 129, 132, 142, 148, 151, - 161, 164, 255, 129, 188, 190, 191, 130, - 131, 130, 134, 128, 132, 135, 136, 138, - 139, 140, 141, 149, 150, 162, 163, 129, - 190, 130, 131, 191, 255, 133, 137, 141, - 151, 129, 132, 142, 161, 162, 163, 164, - 255, 138, 143, 150, 159, 144, 145, 146, - 148, 152, 158, 178, 179, 177, 179, 180, - 186, 135, 142, 177, 179, 180, 185, 187, - 188, 136, 141, 181, 183, 185, 152, 153, - 190, 191, 191, 177, 190, 128, 132, 134, - 135, 141, 151, 153, 188, 134, 128, 129, - 130, 141, 156, 157, 158, 159, 160, 162, - 164, 168, 169, 170, 172, 173, 174, 175, - 176, 179, 183, 177, 173, 183, 185, 186, - 187, 188, 189, 190, 150, 151, 152, 153, - 158, 160, 177, 180, 130, 132, 141, 157, - 133, 134, 157, 159, 146, 148, 178, 180, - 146, 147, 178, 179, 182, 180, 189, 190, - 255, 134, 157, 137, 147, 148, 255, 139, - 141, 169, 133, 134, 178, 160, 162, 163, - 166, 167, 168, 169, 171, 176, 184, 185, - 187, 155, 151, 152, 153, 154, 150, 160, - 162, 191, 149, 151, 152, 158, 165, 172, - 173, 178, 179, 188, 176, 190, 132, 181, - 187, 128, 131, 180, 188, 189, 255, 130, - 133, 170, 171, 179, 180, 255, 130, 161, - 170, 128, 129, 162, 165, 166, 167, 168, - 173, 167, 173, 166, 169, 170, 174, 175, - 177, 178, 179, 164, 171, 172, 179, 180, - 181, 182, 183, 161, 173, 180, 144, 146, - 148, 168, 178, 179, 184, 185, 128, 181, - 187, 191, 128, 131, 179, 181, 183, 140, - 141, 144, 176, 175, 177, 191, 160, 191, - 128, 130, 170, 175, 153, 154, 153, 154, - 155, 160, 162, 163, 164, 165, 166, 167, - 168, 169, 170, 171, 175, 175, 178, 180, - 189, 158, 159, 176, 177, 130, 134, 139, - 167, 163, 164, 165, 166, 132, 133, 134, - 159, 160, 177, 178, 255, 166, 173, 135, - 145, 146, 147, 131, 179, 188, 128, 130, - 180, 181, 182, 185, 186, 255, 165, 129, - 255, 169, 174, 175, 176, 177, 178, 179, - 180, 181, 182, 131, 140, 141, 188, 176, - 178, 180, 183, 184, 190, 191, 129, 171, - 181, 182, 172, 173, 174, 175, 165, 168, - 172, 173, 163, 170, 172, 184, 190, 158, - 128, 143, 160, 175, 144, 145, 150, 155, - 157, 158, 159, 135, 139, 141, 168, 171, - 189, 160, 182, 186, 191, 129, 131, 133, - 134, 140, 143, 184, 186, 165, 166, 128, - 129, 130, 132, 133, 134, 135, 136, 139, - 140, 141, 144, 145, 146, 147, 150, 151, - 152, 153, 154, 156, 176, 178, 129, 128, - 130, 184, 255, 135, 190, 130, 131, 175, - 176, 178, 183, 184, 187, 255, 172, 128, - 130, 167, 180, 179, 130, 128, 129, 179, - 181, 182, 190, 191, 255, 129, 137, 138, - 140, 141, 255, 180, 190, 172, 174, 175, - 177, 178, 181, 182, 183, 159, 160, 162, - 163, 170, 188, 190, 191, 128, 129, 130, - 131, 128, 151, 129, 132, 135, 136, 139, - 141, 162, 163, 166, 172, 176, 180, 181, - 183, 184, 191, 133, 128, 129, 130, 134, - 176, 185, 189, 177, 178, 179, 186, 187, - 190, 191, 255, 129, 132, 255, 175, 190, - 176, 177, 178, 181, 184, 187, 188, 255, - 129, 155, 158, 255, 189, 176, 178, 179, - 186, 187, 190, 191, 255, 129, 255, 172, - 182, 171, 173, 174, 175, 176, 183, 166, - 157, 159, 160, 161, 162, 171, 175, 190, - 176, 182, 184, 191, 169, 177, 180, 146, - 167, 170, 182, 171, 172, 189, 190, 176, - 180, 176, 182, 143, 146, 178, 157, 158, - 133, 134, 137, 168, 169, 170, 166, 173, - 165, 169, 174, 178, 187, 255, 131, 132, - 140, 169, 174, 255, 130, 132, 128, 182, - 187, 255, 173, 180, 182, 255, 132, 155, - 159, 161, 175, 128, 163, 165, 128, 134, - 136, 152, 155, 161, 163, 164, 166, 170, - 144, 150, 132, 138, 143, 187, 191, 160, - 128, 129, 132, 135, 133, 134, 160, 255, - 192, 255, 139, 168, 160, 128, 129, 132, - 135, 133, 134, 160, 255, 192, 255, 144, - 145, 150, 155, 157, 158, 128, 129, 130, - 132, 133, 134, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 160, 255, 128, - 129, 130, 133, 134, 135, 141, 156, 157, - 158, 159, 160, 162, 164, 168, 169, 170, - 172, 173, 174, 175, 176, 179, 183, 160, - 255, 168, 255, 128, 129, 130, 134, 135, - 141, 156, 157, 158, 159, 160, 162, 164, - 168, 169, 170, 172, 173, 174, 175, 176, - 179, 183, 168, 255, 192, 255, 159, 139, - 187, 158, 159, 176, 255, 135, 138, 139, - 187, 188, 255, 168, 255, 153, 154, 155, - 160, 162, 163, 164, 165, 166, 167, 168, - 169, 170, 171, 175, 177, 178, 179, 180, - 181, 182, 184, 185, 186, 187, 188, 189, - 191, 176, 190, 192, 255, 135, 147, 160, - 188, 128, 156, 184, 129, 255, 128, 129, - 130, 133, 134, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 158, 159, 135, - 255, 148, 176, 140, 168, 132, 160, 188, - 152, 180, 144, 172, 136, 164, 192, 255, - 129, 130, 131, 132, 133, 134, 136, 137, - 138, 139, 140, 141, 143, 144, 145, 146, - 147, 148, 150, 151, 152, 153, 154, 155, - 157, 158, 159, 160, 161, 162, 164, 165, - 166, 167, 168, 169, 171, 172, 173, 174, - 175, 176, 178, 179, 180, 181, 182, 183, - 185, 186, 187, 188, 189, 190, 128, 191, - 129, 130, 131, 132, 133, 134, 136, 137, - 138, 139, 140, 141, 143, 144, 145, 146, - 147, 148, 150, 151, 152, 153, 154, 155, - 157, 158, 159, 160, 161, 162, 164, 165, - 166, 167, 168, 169, 171, 172, 173, 174, - 175, 176, 178, 179, 180, 181, 182, 183, - 185, 186, 187, 188, 189, 190, 128, 191, - 129, 130, 131, 132, 133, 134, 136, 137, - 138, 139, 140, 141, 143, 144, 145, 146, - 147, 148, 150, 151, 152, 153, 154, 155, - 157, 158, 159, 128, 156, 160, 255, 136, - 164, 175, 176, 255, 142, 128, 191, 128, - 129, 152, 155, 156, 130, 191, 139, 141, - 128, 140, 142, 143, 144, 167, 168, 174, - 175, 191, 128, 255, 176, 255, 131, 137, - 191, 145, 189, 135, 129, 130, 132, 133, - 144, 154, 176, 139, 159, 150, 156, 159, - 164, 167, 168, 170, 173, 145, 176, 255, - 139, 255, 166, 176, 171, 179, 160, 161, - 163, 164, 165, 167, 169, 171, 173, 174, - 175, 176, 177, 179, 180, 181, 182, 183, - 184, 185, 186, 187, 188, 189, 190, 191, - 166, 170, 172, 178, 150, 153, 155, 163, - 165, 167, 169, 173, 153, 155, 148, 161, - 163, 255, 189, 132, 185, 144, 152, 161, - 164, 255, 188, 129, 131, 190, 255, 133, - 134, 137, 138, 142, 150, 152, 161, 164, - 255, 131, 134, 137, 138, 142, 144, 146, - 175, 178, 180, 182, 255, 134, 138, 142, - 161, 164, 255, 188, 129, 131, 190, 191, - 128, 132, 135, 136, 139, 141, 150, 151, - 162, 163, 130, 190, 191, 151, 128, 130, - 134, 136, 138, 141, 128, 131, 190, 255, - 133, 137, 142, 148, 151, 161, 164, 255, - 128, 132, 134, 136, 138, 141, 149, 150, - 162, 163, 129, 131, 190, 255, 133, 137, - 142, 150, 152, 161, 164, 255, 130, 131, - 138, 150, 143, 148, 152, 159, 178, 179, - 177, 179, 186, 135, 142, 177, 179, 185, - 187, 188, 136, 141, 181, 183, 185, 152, - 153, 190, 191, 177, 191, 128, 132, 134, - 135, 141, 151, 153, 188, 134, 128, 129, - 130, 141, 156, 157, 158, 159, 160, 162, - 164, 168, 169, 170, 172, 173, 174, 175, - 176, 179, 183, 173, 183, 185, 190, 150, - 153, 158, 160, 177, 180, 130, 141, 157, - 132, 134, 157, 159, 146, 148, 178, 180, - 146, 147, 178, 179, 180, 255, 148, 156, - 158, 255, 139, 141, 169, 133, 134, 160, - 171, 176, 187, 151, 155, 160, 162, 191, - 149, 158, 165, 188, 176, 190, 128, 132, - 180, 255, 133, 170, 180, 255, 128, 130, - 161, 173, 166, 179, 164, 183, 173, 144, - 146, 148, 168, 178, 180, 184, 185, 128, - 181, 187, 191, 128, 131, 179, 181, 183, - 140, 141, 144, 176, 175, 177, 191, 160, - 191, 128, 130, 170, 175, 153, 154, 153, - 154, 155, 160, 162, 163, 164, 165, 166, - 167, 168, 169, 170, 171, 175, 175, 178, - 180, 189, 158, 159, 176, 177, 130, 134, - 139, 163, 167, 128, 129, 180, 255, 134, - 159, 178, 255, 166, 173, 135, 147, 128, - 131, 179, 255, 129, 164, 166, 255, 169, - 182, 131, 188, 140, 141, 176, 178, 180, - 183, 184, 190, 191, 129, 171, 175, 181, - 182, 163, 170, 172, 173, 172, 184, 190, - 158, 128, 143, 160, 175, 144, 145, 150, - 155, 157, 158, 135, 139, 141, 168, 171, - 189, 160, 182, 186, 191, 129, 131, 133, - 134, 140, 143, 184, 186, 165, 166, 128, - 129, 130, 132, 133, 134, 135, 136, 139, - 140, 141, 144, 145, 146, 147, 150, 151, - 152, 153, 154, 156, 176, 178, 128, 130, - 184, 255, 135, 190, 131, 175, 187, 255, - 128, 130, 167, 180, 179, 128, 130, 179, - 255, 129, 137, 141, 255, 190, 172, 183, - 159, 170, 188, 128, 131, 190, 191, 151, - 128, 132, 135, 136, 139, 141, 162, 163, - 166, 172, 176, 180, 181, 191, 128, 134, - 176, 255, 132, 255, 175, 181, 184, 255, - 129, 155, 158, 255, 129, 255, 171, 183, - 157, 171, 175, 182, 184, 191, 146, 167, - 169, 182, 171, 172, 189, 190, 176, 180, - 176, 182, 145, 190, 143, 146, 178, 157, - 158, 133, 134, 137, 168, 169, 170, 165, - 169, 173, 178, 187, 255, 131, 132, 140, - 169, 174, 255, 130, 132, 128, 182, 187, - 255, 173, 180, 182, 255, 132, 155, 159, - 161, 175, 128, 163, 165, 128, 134, 136, - 152, 155, 161, 163, 164, 166, 170, 144, - 150, 132, 138, 160, 128, 129, 132, 135, - 133, 134, 160, 255, 192, 255, 128, 131, - 157, 179, 181, 183, 164, 144, 145, 150, - 155, 157, 158, 159, 145, 146, 151, 166, - 169, 128, 255, 176, 255, 131, 137, 191, - 145, 189, 135, 129, 130, 132, 133, 144, - 154, 176, 139, 159, 150, 156, 159, 164, - 167, 168, 170, 173, 145, 176, 255, 139, - 255, 166, 176, 171, 179, 160, 161, 163, - 164, 165, 166, 167, 169, 171, 172, 173, - 174, 175, 176, 177, 178, 179, 180, 181, - 182, 183, 184, 185, 186, 187, 188, 189, - 190, 191, 168, 170, 150, 153, 155, 163, - 165, 167, 169, 173, 153, 155, 148, 161, - 163, 255, 131, 187, 189, 132, 185, 190, - 255, 141, 144, 129, 136, 145, 151, 152, - 161, 162, 163, 164, 255, 129, 188, 190, - 130, 131, 191, 255, 141, 151, 129, 132, - 133, 134, 137, 138, 142, 161, 162, 163, - 164, 255, 131, 188, 129, 130, 190, 255, - 145, 181, 129, 130, 131, 134, 135, 136, - 137, 138, 139, 141, 142, 175, 176, 177, - 178, 255, 134, 138, 141, 129, 136, 142, - 161, 162, 163, 164, 255, 129, 188, 130, - 131, 190, 191, 128, 141, 129, 132, 135, - 136, 139, 140, 150, 151, 162, 163, 130, - 190, 191, 128, 141, 151, 129, 130, 134, - 136, 138, 140, 128, 129, 131, 190, 255, - 133, 137, 129, 132, 142, 148, 151, 161, - 164, 255, 129, 188, 190, 191, 130, 131, - 130, 134, 128, 132, 135, 136, 138, 139, - 140, 141, 149, 150, 162, 163, 129, 190, - 130, 131, 191, 255, 133, 137, 141, 151, - 129, 132, 142, 161, 162, 163, 164, 255, - 138, 143, 150, 159, 144, 145, 146, 148, - 152, 158, 178, 179, 177, 179, 180, 186, - 135, 142, 177, 179, 180, 185, 187, 188, - 136, 141, 181, 183, 185, 152, 153, 190, - 191, 191, 177, 190, 128, 132, 134, 135, - 141, 151, 153, 188, 134, 128, 129, 130, - 141, 156, 157, 158, 159, 160, 162, 164, - 168, 169, 170, 172, 173, 174, 175, 176, - 179, 183, 177, 173, 183, 185, 186, 187, - 188, 189, 190, 150, 151, 152, 153, 158, - 160, 177, 180, 130, 132, 141, 157, 133, - 134, 157, 159, 146, 148, 178, 180, 146, - 147, 178, 179, 182, 180, 189, 190, 255, - 134, 157, 137, 147, 148, 255, 139, 141, - 169, 133, 134, 178, 160, 162, 163, 166, - 167, 168, 169, 171, 176, 184, 185, 187, - 155, 151, 152, 153, 154, 150, 160, 162, - 191, 149, 151, 152, 158, 165, 172, 173, - 178, 179, 188, 176, 190, 132, 181, 187, - 128, 131, 180, 188, 189, 255, 130, 133, - 170, 171, 179, 180, 255, 130, 161, 170, - 128, 129, 162, 165, 166, 167, 168, 173, - 167, 173, 166, 169, 170, 174, 175, 177, - 178, 179, 164, 171, 172, 179, 180, 181, - 182, 183, 161, 173, 180, 144, 146, 148, - 168, 178, 179, 184, 185, 128, 181, 187, - 191, 128, 131, 179, 181, 183, 140, 141, - 144, 176, 175, 177, 191, 160, 191, 128, - 130, 170, 175, 153, 154, 153, 154, 155, - 160, 162, 163, 164, 165, 166, 167, 168, - 169, 170, 171, 175, 175, 178, 180, 189, - 158, 159, 176, 177, 130, 134, 139, 167, - 163, 164, 165, 166, 132, 133, 134, 159, - 160, 177, 178, 255, 166, 173, 135, 145, - 146, 147, 131, 179, 188, 128, 130, 180, - 181, 182, 185, 186, 255, 165, 129, 255, - 169, 174, 175, 176, 177, 178, 179, 180, - 181, 182, 131, 140, 141, 188, 176, 178, - 180, 183, 184, 190, 191, 129, 171, 181, - 182, 172, 173, 174, 175, 165, 168, 172, - 173, 163, 170, 172, 184, 190, 158, 128, - 143, 160, 175, 144, 145, 150, 155, 157, - 158, 159, 135, 139, 141, 168, 171, 189, - 160, 182, 186, 191, 129, 131, 133, 134, - 140, 143, 184, 186, 165, 166, 128, 129, - 130, 132, 133, 134, 135, 136, 139, 140, - 141, 144, 145, 146, 147, 150, 151, 152, - 153, 154, 156, 176, 178, 129, 128, 130, - 184, 255, 135, 190, 130, 131, 175, 176, - 178, 183, 184, 187, 255, 172, 128, 130, - 167, 180, 179, 130, 128, 129, 179, 181, - 182, 190, 191, 255, 129, 137, 138, 140, - 141, 255, 180, 190, 172, 174, 175, 177, - 178, 181, 182, 183, 159, 160, 162, 163, - 170, 188, 190, 191, 128, 129, 130, 131, - 128, 151, 129, 132, 135, 136, 139, 141, - 162, 163, 166, 172, 176, 180, 181, 183, - 184, 191, 133, 128, 129, 130, 134, 176, - 185, 189, 177, 178, 179, 186, 187, 190, - 191, 255, 129, 132, 255, 175, 190, 176, - 177, 178, 181, 184, 187, 188, 255, 129, - 155, 158, 255, 189, 176, 178, 179, 186, - 187, 190, 191, 255, 129, 255, 172, 182, - 171, 173, 174, 175, 176, 183, 166, 157, - 159, 160, 161, 162, 171, 175, 190, 176, - 182, 184, 191, 169, 177, 180, 146, 167, - 170, 182, 171, 172, 189, 190, 176, 180, - 176, 182, 143, 146, 178, 157, 158, 133, - 134, 137, 168, 169, 170, 166, 173, 165, - 169, 174, 178, 187, 255, 131, 132, 140, - 169, 174, 255, 130, 132, 128, 182, 187, - 255, 173, 180, 182, 255, 132, 155, 159, - 161, 175, 128, 163, 165, 128, 134, 136, - 152, 155, 161, 163, 164, 166, 170, 144, - 150, 132, 138, 143, 187, 191, 160, 128, - 129, 132, 135, 133, 134, 160, 255, 192, - 255, 139, 168, 128, 159, 160, 175, 176, - 191, 157, 128, 191, 128, 255, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 171, 179, - 160, 161, 163, 164, 165, 166, 167, 169, - 171, 172, 173, 174, 175, 176, 177, 178, - 179, 180, 181, 182, 183, 184, 185, 186, - 187, 188, 189, 190, 191, 168, 170, 150, - 153, 155, 163, 165, 167, 169, 173, 153, - 155, 148, 161, 163, 255, 131, 187, 189, - 132, 185, 190, 255, 128, 255, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 171, 179, - 160, 161, 163, 164, 165, 167, 169, 171, - 173, 174, 175, 176, 177, 179, 180, 181, - 182, 183, 184, 185, 186, 187, 188, 189, - 190, 191, 166, 170, 172, 178, 150, 153, - 155, 163, 165, 167, 169, 173, 153, 155, - 148, 161, 163, 255, 189, 132, 185, 144, - 152, 161, 164, 255, 188, 129, 131, 190, - 255, 133, 134, 137, 138, 142, 150, 152, - 161, 164, 255, 131, 134, 137, 138, 142, - 144, 146, 175, 178, 180, 182, 255, 134, - 138, 142, 161, 164, 255, 188, 129, 131, - 190, 191, 128, 132, 135, 136, 139, 141, - 150, 151, 162, 163, 130, 190, 191, 151, - 128, 130, 134, 136, 138, 141, 128, 131, - 190, 255, 133, 137, 142, 148, 151, 161, - 164, 255, 128, 132, 134, 136, 138, 141, - 149, 150, 162, 163, 129, 131, 190, 255, - 133, 137, 142, 150, 152, 161, 164, 255, - 130, 131, 138, 150, 143, 148, 152, 159, - 178, 179, 177, 179, 186, 135, 142, 177, - 179, 185, 187, 188, 136, 141, 181, 183, - 185, 152, 153, 190, 191, 177, 191, 128, - 132, 134, 135, 141, 151, 153, 188, 134, - 128, 129, 130, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 173, 183, 185, - 190, 150, 153, 158, 160, 177, 180, 130, - 141, 157, 132, 134, 157, 159, 146, 148, - 178, 180, 146, 147, 178, 179, 180, 255, - 148, 156, 158, 255, 139, 141, 169, 133, - 134, 160, 171, 176, 187, 151, 155, 160, - 162, 191, 149, 158, 165, 188, 176, 190, - 128, 132, 180, 255, 133, 170, 180, 255, - 128, 130, 161, 173, 166, 179, 164, 183, - 173, 144, 146, 148, 168, 178, 180, 184, - 185, 128, 181, 187, 191, 128, 131, 179, - 181, 183, 140, 141, 128, 131, 157, 179, - 181, 183, 144, 176, 164, 175, 177, 191, - 160, 191, 128, 130, 170, 175, 153, 154, - 153, 154, 155, 160, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 175, 175, - 178, 180, 189, 158, 159, 176, 177, 130, - 134, 139, 163, 167, 128, 129, 180, 255, - 134, 159, 178, 255, 166, 173, 135, 147, - 128, 131, 179, 255, 129, 164, 166, 255, - 169, 182, 131, 188, 140, 141, 176, 178, - 180, 183, 184, 190, 191, 129, 171, 175, - 181, 182, 163, 170, 172, 173, 172, 184, - 190, 158, 128, 143, 160, 175, 144, 145, - 150, 155, 157, 158, 159, 135, 139, 141, - 168, 171, 189, 160, 182, 186, 191, 129, - 131, 133, 134, 140, 143, 184, 186, 165, - 166, 128, 129, 130, 132, 133, 134, 135, - 136, 139, 140, 141, 144, 145, 146, 147, - 150, 151, 152, 153, 154, 156, 176, 178, - 128, 130, 184, 255, 135, 190, 131, 175, - 187, 255, 128, 130, 167, 180, 179, 128, - 130, 179, 255, 129, 137, 141, 255, 190, - 172, 183, 159, 170, 188, 128, 131, 190, - 191, 151, 128, 132, 135, 136, 139, 141, - 162, 163, 166, 172, 176, 180, 181, 191, - 128, 134, 176, 255, 132, 255, 175, 181, - 184, 255, 129, 155, 158, 255, 129, 255, - 171, 183, 157, 171, 175, 182, 184, 191, - 146, 167, 169, 182, 171, 172, 189, 190, - 176, 180, 176, 182, 145, 190, 143, 146, - 178, 157, 158, 133, 134, 137, 168, 169, - 170, 165, 169, 173, 178, 187, 255, 131, - 132, 140, 169, 174, 255, 130, 132, 128, - 182, 187, 255, 173, 180, 182, 255, 132, - 155, 159, 161, 175, 128, 163, 165, 128, - 134, 136, 152, 155, 161, 163, 164, 166, - 170, 144, 150, 132, 138, 145, 146, 151, - 166, 169, 139, 168, 160, 128, 129, 132, - 135, 133, 134, 160, 255, 192, 255, 144, - 145, 150, 155, 157, 158, 141, 144, 129, - 136, 145, 151, 152, 161, 162, 163, 164, - 255, 129, 188, 190, 130, 131, 191, 255, - 141, 151, 129, 132, 133, 134, 137, 138, - 142, 161, 162, 163, 164, 255, 131, 188, - 129, 130, 190, 255, 145, 181, 129, 130, - 131, 134, 135, 136, 137, 138, 139, 141, - 142, 175, 176, 177, 178, 255, 134, 138, - 141, 129, 136, 142, 161, 162, 163, 164, - 255, 129, 188, 130, 131, 190, 191, 128, - 141, 129, 132, 135, 136, 139, 140, 150, - 151, 162, 163, 130, 190, 191, 128, 141, - 151, 129, 130, 134, 136, 138, 140, 128, - 129, 131, 190, 255, 133, 137, 129, 132, - 142, 148, 151, 161, 164, 255, 129, 188, - 190, 191, 130, 131, 130, 134, 128, 132, - 135, 136, 138, 139, 140, 141, 149, 150, - 162, 163, 129, 190, 130, 131, 191, 255, - 133, 137, 141, 151, 129, 132, 142, 161, - 162, 163, 164, 255, 138, 143, 150, 159, - 144, 145, 146, 148, 152, 158, 178, 179, - 177, 179, 180, 186, 135, 142, 177, 179, - 180, 185, 187, 188, 136, 141, 181, 183, - 185, 152, 153, 190, 191, 191, 177, 190, - 128, 132, 134, 135, 141, 151, 153, 188, - 134, 128, 129, 130, 141, 156, 157, 158, - 159, 160, 162, 164, 168, 169, 170, 172, - 173, 174, 175, 176, 179, 183, 177, 173, - 183, 185, 186, 187, 188, 189, 190, 150, - 151, 152, 153, 158, 160, 177, 180, 130, - 132, 141, 157, 133, 134, 157, 159, 146, - 148, 178, 180, 146, 147, 178, 179, 182, - 180, 189, 190, 255, 134, 157, 137, 147, - 148, 255, 139, 141, 169, 133, 134, 178, - 160, 162, 163, 166, 167, 168, 169, 171, - 176, 184, 185, 187, 155, 151, 152, 153, - 154, 150, 160, 162, 191, 149, 151, 152, - 158, 165, 172, 173, 178, 179, 188, 176, - 190, 132, 181, 187, 128, 131, 180, 188, - 189, 255, 130, 133, 170, 171, 179, 180, - 255, 130, 161, 170, 128, 129, 162, 165, - 166, 167, 168, 173, 167, 173, 166, 169, - 170, 174, 175, 177, 178, 179, 164, 171, - 172, 179, 180, 181, 182, 183, 161, 173, - 180, 144, 146, 148, 168, 178, 179, 184, - 185, 128, 181, 187, 191, 128, 131, 179, - 181, 183, 140, 141, 144, 176, 175, 177, - 191, 160, 191, 128, 130, 170, 175, 153, - 154, 153, 154, 155, 160, 162, 163, 164, - 165, 166, 167, 168, 169, 170, 171, 175, - 175, 178, 180, 189, 158, 159, 176, 177, - 130, 134, 139, 167, 163, 164, 165, 166, - 132, 133, 134, 159, 160, 177, 178, 255, - 166, 173, 135, 145, 146, 147, 131, 179, - 188, 128, 130, 180, 181, 182, 185, 186, - 255, 165, 129, 255, 169, 174, 175, 176, - 177, 178, 179, 180, 181, 182, 131, 140, - 141, 188, 176, 178, 180, 183, 184, 190, - 191, 129, 171, 181, 182, 172, 173, 174, - 175, 165, 168, 172, 173, 163, 170, 172, - 184, 190, 158, 128, 143, 160, 175, 144, - 145, 150, 155, 157, 158, 159, 135, 139, - 141, 168, 171, 189, 160, 182, 186, 191, - 129, 131, 133, 134, 140, 143, 184, 186, - 165, 166, 128, 129, 130, 132, 133, 134, - 135, 136, 139, 140, 141, 144, 145, 146, - 147, 150, 151, 152, 153, 154, 156, 176, - 178, 129, 128, 130, 184, 255, 135, 190, - 130, 131, 175, 176, 178, 183, 184, 187, - 255, 172, 128, 130, 167, 180, 179, 130, - 128, 129, 179, 181, 182, 190, 191, 255, - 129, 137, 138, 140, 141, 255, 180, 190, - 172, 174, 175, 177, 178, 181, 182, 183, - 159, 160, 162, 163, 170, 188, 190, 191, - 128, 129, 130, 131, 128, 151, 129, 132, - 135, 136, 139, 141, 162, 163, 166, 172, - 176, 180, 181, 183, 184, 191, 133, 128, - 129, 130, 134, 176, 185, 189, 177, 178, - 179, 186, 187, 190, 191, 255, 129, 132, - 255, 175, 190, 176, 177, 178, 181, 184, - 187, 188, 255, 129, 155, 158, 255, 189, - 176, 178, 179, 186, 187, 190, 191, 255, - 129, 255, 172, 182, 171, 173, 174, 175, - 176, 183, 166, 157, 159, 160, 161, 162, - 171, 175, 190, 176, 182, 184, 191, 169, - 177, 180, 146, 167, 170, 182, 171, 172, - 189, 190, 176, 180, 176, 182, 143, 146, - 178, 157, 158, 133, 134, 137, 168, 169, - 170, 166, 173, 165, 169, 174, 178, 187, - 255, 131, 132, 140, 169, 174, 255, 130, - 132, 128, 182, 187, 255, 173, 180, 182, - 255, 132, 155, 159, 161, 175, 128, 163, - 165, 128, 134, 136, 152, 155, 161, 163, - 164, 166, 170, 144, 150, 132, 138, 143, - 187, 191, 160, 128, 129, 132, 135, 133, - 134, 160, 255, 192, 255, 185, 128, 191, - 128, 137, 138, 141, 142, 191, 128, 191, - 165, 177, 178, 179, 180, 181, 182, 184, - 185, 186, 187, 188, 189, 191, 128, 175, - 176, 190, 192, 255, 128, 159, 160, 188, - 189, 191, 128, 156, 184, 129, 255, 148, - 176, 140, 168, 132, 160, 188, 152, 180, - 144, 172, 136, 164, 192, 255, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 160, 161, 162, 164, 165, 166, 167, - 168, 169, 171, 172, 173, 174, 175, 176, - 178, 179, 180, 181, 182, 183, 185, 186, - 187, 188, 189, 190, 128, 191, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 160, 161, 162, 164, 165, 166, 167, - 168, 169, 171, 172, 173, 174, 175, 176, - 178, 179, 180, 181, 182, 183, 185, 186, - 187, 188, 189, 190, 128, 191, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 160, 191, 128, 156, 161, 190, 192, - 255, 136, 164, 175, 176, 255, 135, 138, - 139, 187, 188, 191, 192, 255, 0, 127, - 192, 255, 187, 191, 128, 190, 191, 128, - 190, 188, 128, 175, 176, 189, 190, 191, - 145, 155, 157, 159, 128, 191, 130, 135, - 128, 191, 189, 128, 191, 128, 129, 130, - 131, 132, 191, 178, 128, 191, 128, 159, - 160, 163, 164, 191, 133, 128, 191, 128, - 178, 179, 186, 187, 191, 135, 142, 143, - 145, 146, 149, 150, 153, 154, 155, 164, - 128, 191, 128, 165, 166, 191, 128, 255, - 176, 255, 131, 137, 191, 145, 189, 135, - 129, 130, 132, 133, 144, 154, 176, 139, - 159, 150, 156, 159, 164, 167, 168, 170, - 173, 145, 176, 255, 139, 255, 166, 176, - 171, 179, 160, 161, 163, 164, 165, 167, - 169, 171, 173, 174, 175, 176, 177, 179, - 180, 181, 182, 183, 184, 185, 186, 187, - 188, 189, 190, 191, 166, 170, 172, 178, - 150, 153, 155, 163, 165, 167, 169, 173, - 153, 155, 148, 161, 163, 255, 189, 132, - 185, 144, 152, 161, 164, 255, 188, 129, - 131, 190, 255, 133, 134, 137, 138, 142, - 150, 152, 161, 164, 255, 131, 134, 137, - 138, 142, 144, 146, 175, 178, 180, 182, - 255, 134, 138, 142, 161, 164, 255, 188, - 129, 131, 190, 191, 128, 132, 135, 136, - 139, 141, 150, 151, 162, 163, 130, 190, - 191, 151, 128, 130, 134, 136, 138, 141, - 128, 131, 190, 255, 133, 137, 142, 148, - 151, 161, 164, 255, 128, 132, 134, 136, - 138, 141, 149, 150, 162, 163, 129, 131, - 190, 255, 133, 137, 142, 150, 152, 161, - 164, 255, 130, 131, 138, 150, 143, 148, - 152, 159, 178, 179, 177, 179, 186, 135, - 142, 177, 179, 185, 187, 188, 136, 141, - 181, 183, 185, 152, 153, 190, 191, 177, - 191, 128, 132, 134, 135, 141, 151, 153, - 188, 134, 128, 129, 130, 141, 156, 157, - 158, 159, 160, 162, 164, 168, 169, 170, - 172, 173, 174, 175, 176, 179, 183, 173, - 183, 185, 190, 150, 153, 158, 160, 177, - 180, 130, 141, 157, 132, 134, 157, 159, - 146, 148, 178, 180, 146, 147, 178, 179, - 180, 255, 148, 156, 158, 255, 139, 141, - 169, 133, 134, 160, 171, 176, 187, 151, - 155, 160, 162, 191, 149, 158, 165, 188, - 176, 190, 128, 132, 180, 255, 133, 170, - 180, 255, 128, 130, 161, 173, 166, 179, - 164, 183, 173, 144, 146, 148, 168, 178, - 180, 184, 185, 128, 181, 187, 191, 128, - 131, 179, 181, 183, 140, 141, 128, 131, - 157, 179, 181, 183, 144, 176, 164, 175, - 177, 191, 160, 191, 128, 130, 170, 175, - 153, 154, 153, 154, 155, 160, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, - 175, 175, 178, 180, 189, 158, 159, 176, - 177, 130, 134, 139, 163, 167, 128, 129, - 180, 255, 134, 159, 178, 255, 166, 173, - 135, 147, 128, 131, 179, 255, 129, 164, - 166, 255, 169, 182, 131, 188, 140, 141, - 176, 178, 180, 183, 184, 190, 191, 129, - 171, 175, 181, 182, 163, 170, 172, 173, - 172, 184, 190, 158, 128, 143, 160, 175, - 144, 145, 150, 155, 157, 158, 159, 135, - 139, 141, 168, 171, 189, 160, 182, 186, - 191, 129, 131, 133, 134, 140, 143, 184, - 186, 165, 166, 128, 129, 130, 132, 133, - 134, 135, 136, 139, 140, 141, 144, 145, - 146, 147, 150, 151, 152, 153, 154, 156, - 176, 178, 128, 130, 184, 255, 135, 190, - 131, 175, 187, 255, 128, 130, 167, 180, - 179, 128, 130, 179, 255, 129, 137, 141, - 255, 190, 172, 183, 159, 170, 188, 128, - 131, 190, 191, 151, 128, 132, 135, 136, - 139, 141, 162, 163, 166, 172, 176, 180, - 181, 191, 128, 134, 176, 255, 132, 255, - 175, 181, 184, 255, 129, 155, 158, 255, - 129, 255, 171, 183, 157, 171, 175, 182, - 184, 191, 146, 167, 169, 182, 171, 172, - 189, 190, 176, 180, 176, 182, 145, 190, - 143, 146, 178, 157, 158, 133, 134, 137, - 168, 169, 170, 165, 169, 173, 178, 187, - 255, 131, 132, 140, 169, 174, 255, 130, - 132, 128, 182, 187, 255, 173, 180, 182, - 255, 132, 155, 159, 161, 175, 128, 163, - 165, 128, 134, 136, 152, 155, 161, 163, - 164, 166, 170, 144, 150, 132, 138, 145, - 146, 151, 166, 169, 128, 255, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 171, 179, - 160, 161, 163, 164, 165, 166, 167, 169, - 171, 172, 173, 174, 175, 176, 177, 178, - 179, 180, 181, 182, 183, 184, 185, 186, - 187, 188, 189, 190, 191, 168, 170, 150, - 153, 155, 163, 165, 167, 169, 173, 153, - 155, 148, 161, 163, 255, 131, 187, 189, - 132, 185, 190, 255, 141, 144, 129, 136, - 145, 151, 152, 161, 162, 163, 164, 255, - 129, 188, 190, 130, 131, 191, 255, 141, - 151, 129, 132, 133, 134, 137, 138, 142, - 161, 162, 163, 164, 255, 131, 188, 129, - 130, 190, 255, 145, 181, 129, 130, 131, - 134, 135, 136, 137, 138, 139, 141, 142, - 175, 176, 177, 178, 255, 134, 138, 141, - 129, 136, 142, 161, 162, 163, 164, 255, - 129, 188, 130, 131, 190, 191, 128, 141, - 129, 132, 135, 136, 139, 140, 150, 151, - 162, 163, 130, 190, 191, 128, 141, 151, - 129, 130, 134, 136, 138, 140, 128, 129, - 131, 190, 255, 133, 137, 129, 132, 142, - 148, 151, 161, 164, 255, 129, 188, 190, - 191, 130, 131, 130, 134, 128, 132, 135, - 136, 138, 139, 140, 141, 149, 150, 162, - 163, 129, 190, 130, 131, 191, 255, 133, - 137, 141, 151, 129, 132, 142, 161, 162, - 163, 164, 255, 138, 143, 150, 159, 144, - 145, 146, 148, 152, 158, 178, 179, 177, - 179, 180, 186, 135, 142, 177, 179, 180, - 185, 187, 188, 136, 141, 181, 183, 185, - 152, 153, 190, 191, 191, 177, 190, 128, - 132, 134, 135, 141, 151, 153, 188, 134, - 128, 129, 130, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 177, 173, 183, - 185, 186, 187, 188, 189, 190, 150, 151, - 152, 153, 158, 160, 177, 180, 130, 132, - 141, 157, 133, 134, 157, 159, 146, 148, - 178, 180, 146, 147, 178, 179, 182, 180, - 189, 190, 255, 134, 157, 137, 147, 148, - 255, 139, 141, 169, 133, 134, 178, 160, - 162, 163, 166, 167, 168, 169, 171, 176, - 184, 185, 187, 155, 151, 152, 153, 154, - 150, 160, 162, 191, 149, 151, 152, 158, - 165, 172, 173, 178, 179, 188, 176, 190, - 132, 181, 187, 128, 131, 180, 188, 189, - 255, 130, 133, 170, 171, 179, 180, 255, - 130, 161, 170, 128, 129, 162, 165, 166, - 167, 168, 173, 167, 173, 166, 169, 170, - 174, 175, 177, 178, 179, 164, 171, 172, - 179, 180, 181, 182, 183, 161, 173, 180, - 144, 146, 148, 168, 178, 179, 184, 185, - 128, 181, 187, 191, 128, 131, 179, 181, - 183, 140, 141, 144, 176, 175, 177, 191, - 160, 191, 128, 130, 170, 175, 153, 154, - 153, 154, 155, 160, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 175, 175, - 178, 180, 189, 158, 159, 176, 177, 130, - 134, 139, 167, 163, 164, 165, 166, 132, - 133, 134, 159, 160, 177, 178, 255, 166, - 173, 135, 145, 146, 147, 131, 179, 188, - 128, 130, 180, 181, 182, 185, 186, 255, - 165, 129, 255, 169, 174, 175, 176, 177, - 178, 179, 180, 181, 182, 131, 140, 141, - 188, 176, 178, 180, 183, 184, 190, 191, - 129, 171, 181, 182, 172, 173, 174, 175, - 165, 168, 172, 173, 163, 170, 172, 184, - 190, 158, 128, 143, 160, 175, 144, 145, - 150, 155, 157, 158, 159, 135, 139, 141, - 168, 171, 189, 160, 182, 186, 191, 129, - 131, 133, 134, 140, 143, 184, 186, 165, - 166, 128, 129, 130, 132, 133, 134, 135, - 136, 139, 140, 141, 144, 145, 146, 147, - 150, 151, 152, 153, 154, 156, 176, 178, - 129, 128, 130, 184, 255, 135, 190, 130, - 131, 175, 176, 178, 183, 184, 187, 255, - 172, 128, 130, 167, 180, 179, 130, 128, - 129, 179, 181, 182, 190, 191, 255, 129, - 137, 138, 140, 141, 255, 180, 190, 172, - 174, 175, 177, 178, 181, 182, 183, 159, - 160, 162, 163, 170, 188, 190, 191, 128, - 129, 130, 131, 128, 151, 129, 132, 135, - 136, 139, 141, 162, 163, 166, 172, 176, - 180, 181, 183, 184, 191, 133, 128, 129, - 130, 134, 176, 185, 189, 177, 178, 179, - 186, 187, 190, 191, 255, 129, 132, 255, - 175, 190, 176, 177, 178, 181, 184, 187, - 188, 255, 129, 155, 158, 255, 189, 176, - 178, 179, 186, 187, 190, 191, 255, 129, - 255, 172, 182, 171, 173, 174, 175, 176, - 183, 166, 157, 159, 160, 161, 162, 171, - 175, 190, 176, 182, 184, 191, 169, 177, - 180, 146, 167, 170, 182, 171, 172, 189, - 190, 176, 180, 176, 182, 143, 146, 178, - 157, 158, 133, 134, 137, 168, 169, 170, - 166, 173, 165, 169, 174, 178, 187, 255, - 131, 132, 140, 169, 174, 255, 130, 132, - 128, 182, 187, 255, 173, 180, 182, 255, - 132, 155, 159, 161, 175, 128, 163, 165, - 128, 134, 136, 152, 155, 161, 163, 164, - 166, 170, 144, 150, 132, 138, 143, 187, - 191, 160, 128, 129, 132, 135, 133, 134, - 160, 255, 192, 255, 139, 168, 160, 128, - 129, 132, 135, 133, 134, 160, 255, 192, - 255, 144, 145, 150, 155, 157, 158, 144, - 145, 150, 155, 157, 158, 159, 135, 166, - 191, 133, 128, 191, 128, 130, 131, 132, - 133, 137, 138, 139, 140, 191, 174, 188, - 128, 129, 130, 131, 132, 133, 134, 144, - 145, 165, 166, 169, 170, 175, 176, 184, - 185, 191, 128, 132, 170, 129, 135, 136, - 191, 181, 186, 128, 191, 144, 128, 148, - 149, 150, 151, 191, 128, 132, 133, 135, - 136, 138, 139, 143, 144, 191, 163, 128, - 179, 180, 182, 183, 191, 128, 129, 191, - 166, 176, 191, 128, 151, 152, 158, 159, - 178, 179, 185, 186, 187, 188, 190, 128, - 191, 160, 128, 191, 128, 130, 131, 135, - 191, 129, 134, 136, 190, 128, 159, 160, - 191, 128, 175, 176, 255, 10, 13, 127, - 194, 216, 219, 220, 224, 225, 226, 234, - 235, 236, 237, 239, 240, 243, 0, 31, - 128, 191, 192, 223, 227, 238, 241, 247, - 248, 255, 204, 205, 210, 214, 215, 216, - 217, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 234, 239, 240, 243, 204, 205, - 210, 214, 215, 216, 217, 219, 220, 221, - 222, 223, 224, 225, 226, 227, 234, 239, - 240, 243, 204, 205, 210, 214, 215, 216, - 217, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 234, 239, 240, 243, 194, 216, - 219, 220, 224, 225, 226, 234, 235, 236, - 237, 239, 240, 243, 32, 126, 192, 223, - 227, 238, 241, 247, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 235, 236, 237, 239, 240, 243, 204, - 205, 210, 214, 215, 216, 217, 219, 220, - 221, 222, 223, 224, 225, 226, 227, 234, - 237, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 237, 239, 240, - 243, 204, 205, 210, 214, 215, 216, 217, - 219, 220, 221, 222, 223, 224, 225, 226, - 227, 234, 237, 239, 240, 243, 204, 205, - 210, 214, 215, 216, 217, 219, 220, 221, - 222, 223, 224, 225, 226, 227, 234, 239, - 240, 243, 204, 205, 210, 214, 215, 216, - 217, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 234, 235, 236, 237, 239, 240, - 243, 204, 205, 210, 214, 215, 216, 217, - 219, 220, 221, 222, 223, 224, 225, 226, - 227, 234, 239, 240, 243, 204, 205, 210, - 214, 215, 216, 217, 219, 220, 221, 222, - 223, 224, 225, 226, 227, 234, 239, 240, - 243, 204, 205, 210, 214, 215, 216, 217, - 219, 220, 221, 222, 223, 224, 225, 226, - 227, 234, 239, 240, 243, 204, 205, 210, - 214, 215, 216, 217, 219, 220, 221, 222, - 223, 224, 225, 226, 227, 234, 237, 239, - 240, 243, 204, 205, 210, 214, 215, 216, - 217, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 234, 237, 239, 240, 243, 204, - 205, 210, 214, 215, 216, 217, 219, 220, - 221, 222, 223, 224, 225, 226, 227, 234, - 237, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, -} - -var _graphclust_single_lengths []byte = []byte{ - 0, 1, 0, 0, 0, 1, 1, 0, - 1, 0, 1, 0, 0, 0, 26, 0, - 0, 0, 1, 1, 1, 0, 0, 2, - 1, 0, 1, 1, 0, 2, 0, 0, - 2, 0, 2, 1, 0, 1, 0, 3, - 0, 0, 1, 21, 0, 0, 3, 0, - 0, 0, 0, 0, 0, 1, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 1, - 0, 5, 2, 6, 0, 1, 0, 1, - 0, 2, 0, 0, 15, 0, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 0, - 2, 1, 1, 0, 3, 1, 0, 7, - 5, 1, 1, 0, 1, 0, 23, 0, - 0, 0, 0, 1, 0, 0, 1, 0, - 1, 1, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 4, 0, 0, - 0, 0, 1, 0, 6, 0, 0, 0, - 0, 0, 1, 3, 0, 0, 0, 3, - 0, 0, 0, 0, 1, 1, 0, 1, - 0, 1, 0, 0, 0, 29, 0, 0, - 0, 3, 2, 3, 2, 2, 2, 3, - 2, 2, 3, 3, 1, 2, 4, 2, - 2, 4, 4, 2, 0, 2, 0, 3, - 1, 0, 1, 21, 1, 0, 4, 0, - 0, 0, 1, 2, 0, 1, 1, 1, - 4, 0, 3, 1, 3, 2, 0, 3, - 0, 5, 2, 0, 0, 1, 0, 2, - 0, 0, 15, 0, 0, 0, 4, 0, - 0, 0, 3, 1, 0, 4, 1, 4, - 4, 3, 1, 0, 7, 5, 1, 1, - 0, 1, 0, 23, 1, 0, 1, 1, - 1, 1, 0, 2, 1, 3, 2, 0, - 1, 3, 1, 2, 0, 1, 0, 2, - 1, 2, 3, 4, 0, 0, 0, 1, - 0, 6, 2, 0, 0, 0, 0, 1, - 3, 0, 0, 0, 1, 0, 1, 4, - 0, 0, 0, 1, 1, 1, 4, 0, - 0, 0, 6, 0, 1, 1, 0, 0, - 0, 1, 1, 0, 1, 0, 1, 0, - 0, 0, 26, 0, 0, 0, 1, 1, - 1, 0, 0, 2, 1, 0, 1, 1, - 0, 2, 0, 0, 2, 0, 2, 1, - 0, 1, 0, 3, 0, 0, 1, 21, - 0, 0, 3, 0, 0, 0, 0, 0, - 0, 1, 0, 0, 3, 0, 0, 0, - 0, 0, 0, 1, 0, 5, 2, 6, - 0, 1, 0, 1, 0, 2, 0, 0, - 15, 0, 0, 0, 3, 0, 0, 0, - 0, 0, 0, 0, 2, 1, 1, 0, - 3, 1, 0, 7, 5, 1, 1, 0, - 1, 0, 23, 0, 0, 0, 0, 1, - 0, 0, 1, 0, 1, 1, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 4, 0, 0, 0, 0, 1, 0, - 6, 0, 0, 0, 0, 0, 1, 3, - 0, 0, 0, 3, 0, 0, 0, 0, - 1, 1, 0, 1, 0, 1, 0, 0, - 0, 29, 0, 0, 0, 3, 2, 3, - 2, 2, 2, 3, 2, 2, 3, 3, - 1, 2, 4, 2, 2, 4, 4, 2, - 0, 2, 0, 3, 1, 0, 1, 21, - 1, 0, 4, 0, 0, 0, 1, 2, - 0, 1, 1, 1, 4, 0, 3, 1, - 3, 2, 0, 3, 0, 5, 2, 0, - 0, 1, 0, 2, 0, 0, 15, 0, - 0, 0, 4, 0, 0, 0, 3, 1, - 0, 4, 1, 4, 4, 3, 1, 0, - 7, 5, 1, 1, 0, 1, 0, 23, - 1, 0, 1, 1, 1, 1, 0, 2, - 1, 3, 2, 0, 1, 3, 1, 2, - 0, 1, 0, 2, 1, 2, 3, 4, - 0, 0, 0, 1, 0, 6, 2, 0, - 0, 0, 0, 1, 3, 0, 0, 0, - 1, 0, 1, 4, 0, 0, 0, 1, - 1, 1, 4, 0, 0, 0, 6, 0, - 0, 0, 1, 1, 2, 1, 1, 5, - 0, 24, 0, 24, 0, 0, 23, 0, - 0, 1, 0, 2, 0, 0, 0, 28, - 0, 3, 23, 2, 0, 2, 2, 3, - 2, 2, 2, 0, 54, 54, 27, 1, - 0, 5, 2, 0, 1, 1, 0, 0, - 14, 0, 3, 2, 2, 3, 2, 2, - 2, 54, 54, 27, 1, 0, 2, 0, - 1, 4, 2, 1, 0, 1, 0, 1, - 0, 11, 0, 7, 1, 0, 1, 0, - 2, 3, 2, 1, 0, 1, 1, 3, - 0, 1, 3, 0, 1, 1, 2, 1, - 1, 5, 0, 0, 0, 0, 1, 1, - 0, 1, 0, 1, 0, 0, 0, 26, - 0, 0, 0, 1, 1, 1, 0, 0, - 2, 1, 0, 1, 1, 0, 2, 0, - 0, 2, 0, 2, 1, 0, 1, 0, - 3, 0, 0, 1, 21, 0, 0, 3, - 0, 0, 0, 0, 0, 0, 1, 0, - 0, 3, 0, 0, 0, 0, 0, 0, - 1, 0, 5, 2, 6, 0, 1, 0, - 1, 0, 2, 0, 0, 15, 0, 0, - 0, 3, 0, 0, 0, 0, 0, 0, - 0, 2, 1, 1, 0, 3, 1, 0, - 7, 5, 1, 1, 0, 1, 0, 23, - 0, 0, 0, 0, 1, 0, 0, 1, - 0, 1, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 4, 0, - 0, 0, 0, 1, 0, 6, 0, 0, - 0, 0, 0, 1, 3, 0, 0, 0, - 3, 0, 0, 0, 0, 1, 1, 0, - 1, 0, 1, 0, 0, 0, 29, 0, - 0, 0, 3, 2, 3, 2, 2, 2, - 3, 2, 2, 3, 3, 1, 2, 4, - 2, 2, 4, 4, 2, 0, 2, 0, - 3, 1, 0, 1, 21, 1, 0, 4, - 0, 0, 0, 1, 2, 0, 1, 1, - 1, 4, 0, 3, 1, 3, 2, 0, - 3, 0, 5, 2, 0, 0, 1, 0, - 2, 0, 0, 15, 0, 0, 0, 4, - 0, 0, 0, 3, 1, 0, 4, 1, - 4, 4, 3, 1, 0, 7, 5, 1, - 1, 0, 1, 0, 23, 1, 0, 1, - 1, 1, 1, 0, 2, 1, 3, 2, - 0, 1, 3, 1, 2, 0, 1, 0, - 2, 1, 2, 3, 4, 0, 0, 0, - 1, 0, 6, 2, 0, 0, 0, 0, - 1, 3, 0, 0, 0, 1, 0, 1, - 4, 0, 0, 0, 1, 1, 1, 4, - 0, 0, 0, 6, 24, 0, 24, 0, - 0, 23, 0, 0, 1, 0, 2, 0, - 0, 0, 28, 0, 3, 23, 2, 0, - 2, 2, 3, 2, 2, 2, 0, 54, - 54, 27, 1, 1, 5, 2, 0, 0, - 0, 1, 1, 0, 1, 0, 1, 0, - 0, 0, 26, 0, 0, 0, 1, 1, - 1, 0, 0, 2, 1, 0, 1, 1, - 0, 2, 0, 0, 2, 0, 2, 1, - 0, 1, 0, 3, 0, 0, 1, 21, - 0, 0, 3, 0, 0, 0, 0, 0, - 0, 1, 0, 0, 3, 0, 0, 0, - 0, 0, 0, 1, 0, 5, 2, 0, - 0, 1, 0, 2, 0, 0, 15, 0, - 0, 0, 3, 0, 0, 0, 0, 0, - 0, 0, 2, 1, 1, 0, 3, 1, - 0, 6, 5, 1, 1, 0, 1, 0, - 23, 0, 0, 0, 0, 1, 0, 0, - 1, 0, 1, 1, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 4, - 0, 0, 0, 0, 1, 0, 6, 0, - 0, 0, 0, 0, 1, 3, 0, 0, - 0, 1, 4, 0, 0, 0, 6, 1, - 7, 3, 0, 0, 0, 0, 1, 1, - 0, 1, 0, 1, 0, 0, 0, 29, - 0, 0, 0, 3, 2, 3, 2, 2, - 2, 3, 2, 2, 3, 3, 1, 2, - 4, 2, 2, 4, 4, 2, 0, 2, - 0, 3, 1, 0, 1, 21, 1, 0, - 4, 0, 0, 0, 1, 2, 0, 1, - 1, 1, 4, 0, 3, 1, 3, 2, - 0, 3, 0, 5, 2, 0, 0, 1, - 0, 2, 0, 0, 15, 0, 0, 0, - 4, 0, 0, 0, 3, 1, 0, 4, - 1, 4, 4, 3, 1, 0, 7, 5, - 1, 1, 0, 1, 0, 23, 1, 0, - 1, 1, 1, 1, 0, 2, 1, 3, - 2, 0, 1, 3, 1, 2, 0, 1, - 0, 2, 1, 2, 3, 4, 0, 0, - 0, 1, 0, 6, 2, 0, 0, 0, - 0, 1, 3, 0, 0, 0, 1, 0, - 1, 4, 0, 0, 0, 1, 1, 0, - 1, 0, 0, 0, 1, 1, 0, 1, - 0, 1, 0, 0, 0, 29, 0, 0, - 0, 3, 0, 0, 0, 1, 1, 0, - 1, 0, 1, 0, 0, 0, 26, 0, - 0, 0, 1, 1, 1, 0, 0, 2, - 1, 0, 1, 1, 0, 2, 0, 0, - 2, 0, 2, 1, 0, 1, 0, 3, - 0, 0, 1, 21, 0, 0, 3, 0, - 0, 0, 0, 0, 0, 1, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 1, - 0, 5, 2, 6, 0, 1, 0, 1, - 0, 2, 0, 0, 15, 0, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 0, - 2, 1, 1, 0, 3, 1, 0, 7, - 5, 1, 1, 0, 1, 0, 23, 0, - 0, 0, 0, 1, 0, 0, 1, 0, - 1, 1, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 4, 0, 0, - 0, 0, 1, 0, 6, 0, 0, 0, - 0, 0, 1, 3, 0, 0, 0, 3, - 0, 1, 1, 1, 4, 0, 0, 0, - 6, 2, 3, 2, 2, 2, 3, 2, - 2, 3, 3, 1, 2, 4, 2, 2, - 4, 4, 2, 0, 2, 0, 3, 1, - 0, 1, 21, 1, 0, 4, 0, 0, - 0, 1, 2, 0, 1, 1, 1, 4, - 0, 3, 1, 3, 2, 0, 3, 0, - 5, 2, 0, 0, 1, 0, 2, 0, - 0, 15, 0, 0, 0, 4, 0, 0, - 0, 3, 1, 0, 4, 1, 4, 4, - 3, 1, 0, 7, 5, 1, 1, 0, - 1, 0, 23, 1, 0, 1, 1, 1, - 1, 0, 2, 1, 3, 2, 0, 1, - 3, 1, 2, 0, 1, 0, 2, 1, - 2, 3, 4, 0, 0, 0, 1, 0, - 6, 2, 0, 0, 0, 0, 1, 3, - 0, 0, 0, 1, 0, 1, 4, 0, - 0, 0, 1, 0, 0, 14, 0, 3, - 2, 2, 3, 2, 2, 2, 54, 54, - 29, 1, 0, 0, 0, 0, 2, 1, - 1, 4, 2, 1, 0, 1, 0, 1, - 0, 11, 0, 0, 0, 0, 1, 1, - 0, 1, 0, 1, 0, 0, 0, 26, - 0, 0, 0, 1, 1, 1, 0, 0, - 2, 1, 0, 1, 1, 0, 2, 0, - 0, 2, 0, 2, 1, 0, 1, 0, - 3, 0, 0, 1, 21, 0, 0, 3, - 0, 0, 0, 0, 0, 0, 1, 0, - 0, 3, 0, 0, 0, 0, 0, 0, - 1, 0, 5, 2, 6, 0, 1, 0, - 1, 0, 2, 0, 0, 15, 0, 0, - 0, 3, 0, 0, 0, 0, 0, 0, - 0, 2, 1, 1, 0, 3, 1, 0, - 7, 5, 1, 1, 0, 1, 0, 23, - 0, 0, 0, 0, 1, 0, 0, 1, - 0, 1, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 4, 0, - 0, 0, 0, 1, 0, 6, 0, 0, - 0, 0, 0, 1, 3, 0, 0, 0, - 3, 0, 0, 0, 0, 1, 1, 0, - 1, 0, 1, 0, 0, 0, 29, 0, - 0, 0, 3, 2, 3, 2, 2, 2, - 3, 2, 2, 3, 3, 1, 2, 4, - 2, 2, 4, 4, 2, 0, 2, 0, - 3, 1, 0, 1, 21, 1, 0, 4, - 0, 0, 0, 1, 2, 0, 1, 1, - 1, 4, 0, 3, 1, 3, 2, 0, - 3, 0, 5, 2, 0, 0, 1, 0, - 2, 0, 0, 15, 0, 0, 0, 4, - 0, 0, 0, 3, 1, 0, 4, 1, - 4, 4, 3, 1, 0, 7, 5, 1, - 1, 0, 1, 0, 23, 1, 0, 1, - 1, 1, 1, 0, 2, 1, 3, 2, - 0, 1, 3, 1, 2, 0, 1, 0, - 2, 1, 2, 3, 4, 0, 0, 0, - 1, 0, 6, 2, 0, 0, 0, 0, - 1, 3, 0, 0, 0, 1, 0, 1, - 4, 0, 0, 0, 1, 1, 1, 4, - 0, 0, 0, 6, 7, 1, 0, 1, - 0, 2, 3, 2, 1, 0, 1, 1, - 3, 0, 1, 5, 0, 0, 17, 20, - 20, 20, 14, 20, 20, 20, 23, 21, - 21, 21, 20, 23, 20, 20, 20, 21, - 21, 21, 20, 20, 20, 20, 20, 20, - 20, 20, 20, 20, -} - -var _graphclust_range_lengths []byte = []byte{ - 0, 0, 1, 1, 1, 1, 2, 1, - 1, 4, 1, 1, 1, 1, 2, 4, - 1, 2, 1, 2, 2, 5, 6, 2, - 2, 5, 1, 3, 2, 3, 5, 2, - 3, 1, 3, 1, 1, 2, 1, 2, - 1, 4, 0, 0, 2, 3, 1, 1, - 2, 2, 1, 2, 1, 1, 2, 1, - 2, 1, 2, 2, 2, 1, 1, 4, - 2, 0, 0, 0, 1, 0, 1, 0, - 1, 0, 1, 1, 0, 2, 1, 1, - 1, 2, 2, 1, 1, 2, 2, 1, - 1, 3, 2, 2, 0, 0, 2, 0, - 0, 0, 0, 1, 4, 1, 0, 2, - 1, 2, 2, 0, 2, 2, 1, 1, - 2, 6, 1, 1, 1, 1, 2, 2, - 1, 1, 1, 2, 2, 0, 1, 1, - 1, 1, 0, 1, 0, 3, 3, 1, - 2, 2, 2, 0, 5, 1, 1, 0, - 1, 1, 1, 1, 1, 2, 1, 1, - 4, 1, 1, 1, 1, 1, 4, 1, - 2, 2, 5, 2, 6, 2, 8, 4, - 2, 5, 0, 3, 2, 4, 1, 6, - 2, 4, 4, 1, 1, 2, 1, 2, - 1, 4, 0, 0, 4, 4, 1, 1, - 2, 2, 2, 2, 1, 1, 6, 2, - 5, 1, 3, 3, 4, 4, 4, 4, - 2, 0, 0, 1, 1, 0, 1, 0, - 1, 1, 0, 2, 1, 1, 2, 4, - 1, 2, 4, 1, 5, 0, 3, 2, - 1, 0, 0, 2, 0, 0, 0, 0, - 1, 4, 1, 0, 2, 1, 4, 2, - 0, 4, 3, 4, 2, 2, 6, 2, - 2, 4, 1, 4, 2, 4, 1, 3, - 3, 2, 2, 0, 1, 1, 1, 0, - 1, 0, 3, 3, 1, 2, 2, 2, - 0, 5, 1, 1, 0, 1, 0, 1, - 1, 1, 0, 0, 0, 0, 1, 1, - 1, 0, 0, 1, 2, 2, 1, 1, - 1, 1, 2, 1, 1, 4, 1, 1, - 1, 1, 2, 4, 1, 2, 1, 2, - 2, 5, 6, 2, 2, 5, 1, 3, - 2, 3, 5, 2, 3, 1, 3, 1, - 1, 2, 1, 2, 1, 4, 0, 0, - 2, 3, 1, 1, 2, 2, 1, 2, - 1, 1, 2, 1, 2, 1, 2, 2, - 2, 1, 1, 4, 2, 0, 0, 0, - 1, 0, 1, 0, 1, 0, 1, 1, - 0, 2, 1, 1, 1, 2, 2, 1, - 1, 2, 2, 1, 1, 3, 2, 2, - 0, 0, 2, 0, 0, 0, 0, 1, - 4, 1, 0, 2, 1, 2, 2, 0, - 2, 2, 1, 1, 2, 6, 1, 1, - 1, 1, 2, 2, 1, 1, 1, 2, - 2, 0, 1, 1, 1, 1, 0, 1, - 0, 3, 3, 1, 2, 2, 2, 0, - 5, 1, 1, 0, 1, 1, 1, 1, - 1, 2, 1, 1, 4, 1, 1, 1, - 1, 1, 4, 1, 2, 2, 5, 2, - 6, 2, 8, 4, 2, 5, 0, 3, - 2, 4, 1, 6, 2, 4, 4, 1, - 1, 2, 1, 2, 1, 4, 0, 0, - 4, 4, 1, 1, 2, 2, 2, 2, - 1, 1, 6, 2, 5, 1, 3, 3, - 4, 4, 4, 4, 2, 0, 0, 1, - 1, 0, 1, 0, 1, 1, 0, 2, - 1, 1, 2, 4, 1, 2, 4, 1, - 5, 0, 3, 2, 1, 0, 0, 2, - 0, 0, 0, 0, 1, 4, 1, 0, - 2, 1, 4, 2, 0, 4, 3, 4, - 2, 2, 6, 2, 2, 4, 1, 4, - 2, 4, 1, 3, 3, 2, 2, 0, - 1, 1, 1, 0, 1, 0, 3, 3, - 1, 2, 2, 2, 0, 5, 1, 1, - 0, 1, 0, 1, 1, 1, 0, 0, - 0, 0, 1, 1, 1, 0, 0, 1, - 2, 3, 1, 1, 1, 1, 1, 1, - 1, 0, 1, 0, 1, 1, 0, 1, - 1, 0, 1, 0, 1, 3, 1, 2, - 2, 1, 0, 0, 1, 0, 0, 0, - 0, 0, 1, 0, 1, 1, 2, 2, - 2, 1, 3, 2, 1, 1, 3, 1, - 3, 3, 1, 0, 0, 0, 0, 0, - 1, 1, 1, 2, 2, 4, 1, 1, - 2, 1, 1, 1, 3, 1, 2, 1, - 2, 1, 2, 0, 0, 1, 1, 5, - 9, 2, 1, 3, 5, 3, 1, 6, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 2, - 1, 1, 4, 1, 1, 1, 1, 2, - 4, 1, 2, 1, 2, 2, 5, 6, - 2, 2, 5, 1, 3, 2, 3, 5, - 2, 3, 1, 3, 1, 1, 2, 1, - 2, 1, 4, 0, 0, 2, 3, 1, - 1, 2, 2, 1, 2, 1, 1, 2, - 1, 2, 1, 2, 2, 2, 1, 1, - 4, 2, 0, 0, 0, 1, 0, 1, - 0, 1, 0, 1, 1, 0, 2, 1, - 1, 1, 2, 2, 1, 1, 2, 2, - 1, 1, 3, 2, 2, 0, 0, 2, - 0, 0, 0, 0, 1, 4, 1, 0, - 2, 1, 2, 2, 0, 2, 2, 1, - 1, 2, 6, 1, 1, 1, 1, 2, - 2, 1, 1, 1, 2, 2, 0, 1, - 1, 1, 1, 0, 1, 0, 3, 3, - 1, 2, 2, 2, 0, 5, 1, 1, - 0, 1, 1, 1, 1, 1, 2, 1, - 1, 4, 1, 1, 1, 1, 1, 4, - 1, 2, 2, 5, 2, 6, 2, 8, - 4, 2, 5, 0, 3, 2, 4, 1, - 6, 2, 4, 4, 1, 1, 2, 1, - 2, 1, 4, 0, 0, 4, 4, 1, - 1, 2, 2, 2, 2, 1, 1, 6, - 2, 5, 1, 3, 3, 4, 4, 4, - 4, 2, 0, 0, 1, 1, 0, 1, - 0, 1, 1, 0, 2, 1, 1, 2, - 4, 1, 2, 4, 1, 5, 0, 3, - 2, 1, 0, 0, 2, 0, 0, 0, - 0, 1, 4, 1, 0, 2, 1, 4, - 2, 0, 4, 3, 4, 2, 2, 6, - 2, 2, 4, 1, 4, 2, 4, 1, - 3, 3, 2, 2, 0, 1, 1, 1, - 0, 1, 0, 3, 3, 1, 2, 2, - 2, 0, 5, 1, 1, 0, 1, 0, - 1, 1, 1, 0, 0, 0, 0, 1, - 1, 1, 0, 0, 0, 1, 0, 1, - 1, 0, 1, 1, 0, 1, 0, 1, - 3, 1, 2, 2, 1, 0, 0, 1, - 0, 0, 0, 0, 0, 1, 0, 1, - 1, 2, 2, 1, 1, 5, 1, 1, - 1, 1, 2, 1, 1, 4, 1, 1, - 1, 1, 2, 4, 1, 2, 1, 2, - 2, 5, 6, 2, 2, 5, 1, 3, - 2, 3, 5, 2, 3, 1, 3, 1, - 1, 2, 1, 2, 1, 4, 0, 0, - 2, 3, 1, 1, 2, 2, 1, 2, - 1, 1, 2, 1, 2, 1, 2, 2, - 2, 1, 1, 4, 2, 0, 0, 1, - 1, 0, 1, 0, 1, 1, 0, 2, - 1, 1, 1, 2, 2, 1, 1, 2, - 2, 1, 1, 3, 2, 2, 0, 0, - 2, 0, 0, 0, 0, 1, 4, 1, - 0, 2, 1, 2, 2, 0, 2, 2, - 1, 1, 2, 6, 1, 1, 1, 1, - 2, 2, 1, 1, 1, 2, 2, 0, - 1, 1, 1, 1, 0, 1, 0, 3, - 3, 1, 2, 2, 2, 0, 5, 1, - 1, 0, 1, 1, 1, 0, 0, 0, - 0, 0, 1, 1, 1, 1, 1, 2, - 1, 1, 4, 1, 1, 1, 1, 1, - 4, 1, 2, 2, 5, 2, 6, 2, - 8, 4, 2, 5, 0, 3, 2, 4, - 1, 6, 2, 4, 4, 1, 1, 2, - 1, 2, 1, 4, 0, 0, 4, 4, - 1, 1, 2, 2, 2, 2, 1, 1, - 6, 2, 5, 1, 3, 3, 4, 4, - 4, 4, 2, 0, 0, 1, 1, 0, - 1, 0, 1, 1, 0, 2, 1, 1, - 2, 4, 1, 2, 4, 1, 5, 0, - 3, 2, 1, 0, 0, 2, 0, 0, - 0, 0, 1, 4, 1, 0, 2, 1, - 4, 2, 0, 4, 3, 4, 2, 2, - 6, 2, 2, 4, 1, 4, 2, 4, - 1, 3, 3, 2, 2, 0, 1, 1, - 1, 0, 1, 0, 3, 3, 1, 2, - 2, 2, 0, 5, 1, 1, 0, 1, - 0, 1, 1, 1, 0, 0, 0, 3, - 1, 1, 1, 1, 1, 2, 1, 1, - 4, 1, 1, 1, 1, 1, 4, 1, - 2, 2, 1, 1, 1, 1, 2, 1, - 1, 4, 1, 1, 1, 1, 2, 4, - 1, 2, 1, 2, 2, 5, 6, 2, - 2, 5, 1, 3, 2, 3, 5, 2, - 3, 1, 3, 1, 1, 2, 1, 2, - 1, 4, 0, 0, 2, 3, 1, 1, - 2, 2, 1, 2, 1, 1, 2, 1, - 2, 1, 2, 2, 2, 1, 1, 4, - 2, 0, 0, 0, 1, 0, 1, 0, - 1, 0, 1, 1, 0, 2, 1, 1, - 1, 2, 2, 1, 1, 2, 2, 1, - 1, 3, 2, 2, 0, 0, 2, 0, - 0, 0, 0, 1, 4, 1, 0, 2, - 1, 2, 2, 0, 2, 2, 1, 1, - 2, 6, 1, 1, 1, 1, 2, 2, - 1, 1, 1, 2, 2, 0, 1, 1, - 1, 1, 0, 1, 0, 3, 3, 1, - 2, 2, 2, 0, 5, 1, 1, 0, - 1, 0, 0, 0, 1, 1, 1, 0, - 0, 5, 2, 6, 2, 8, 4, 2, - 5, 0, 3, 2, 4, 1, 6, 2, - 4, 4, 1, 1, 2, 1, 2, 1, - 4, 0, 0, 4, 4, 1, 1, 2, - 2, 2, 2, 1, 1, 6, 2, 5, - 1, 3, 3, 4, 4, 4, 4, 2, - 0, 0, 1, 1, 0, 1, 0, 1, - 1, 0, 2, 1, 1, 2, 4, 1, - 2, 4, 1, 5, 0, 3, 2, 1, - 0, 0, 2, 0, 0, 0, 0, 1, - 4, 1, 0, 2, 1, 4, 2, 0, - 4, 3, 4, 2, 2, 6, 2, 2, - 4, 1, 4, 2, 4, 1, 3, 3, - 2, 2, 0, 1, 1, 1, 0, 1, - 0, 3, 3, 1, 2, 2, 2, 0, - 5, 1, 1, 0, 1, 0, 1, 1, - 1, 0, 1, 3, 1, 3, 3, 1, - 0, 0, 0, 0, 0, 1, 1, 1, - 3, 2, 4, 1, 0, 1, 1, 1, - 3, 1, 1, 1, 3, 1, 3, 1, - 3, 1, 2, 1, 1, 1, 1, 2, - 1, 1, 4, 1, 1, 1, 1, 2, - 4, 1, 2, 1, 2, 2, 5, 6, - 2, 2, 5, 1, 3, 2, 3, 5, - 2, 3, 1, 3, 1, 1, 2, 1, - 2, 1, 4, 0, 0, 2, 3, 1, - 1, 2, 2, 1, 2, 1, 1, 2, - 1, 2, 1, 2, 2, 2, 1, 1, - 4, 2, 0, 0, 0, 1, 0, 1, - 0, 1, 0, 1, 1, 0, 2, 1, - 1, 1, 2, 2, 1, 1, 2, 2, - 1, 1, 3, 2, 2, 0, 0, 2, - 0, 0, 0, 0, 1, 4, 1, 0, - 2, 1, 2, 2, 0, 2, 2, 1, - 1, 2, 6, 1, 1, 1, 1, 2, - 2, 1, 1, 1, 2, 2, 0, 1, - 1, 1, 1, 0, 1, 0, 3, 3, - 1, 2, 2, 2, 0, 5, 1, 1, - 0, 1, 1, 1, 1, 1, 2, 1, - 1, 4, 1, 1, 1, 1, 1, 4, - 1, 2, 2, 5, 2, 6, 2, 8, - 4, 2, 5, 0, 3, 2, 4, 1, - 6, 2, 4, 4, 1, 1, 2, 1, - 2, 1, 4, 0, 0, 4, 4, 1, - 1, 2, 2, 2, 2, 1, 1, 6, - 2, 5, 1, 3, 3, 4, 4, 4, - 4, 2, 0, 0, 1, 1, 0, 1, - 0, 1, 1, 0, 2, 1, 1, 2, - 4, 1, 2, 4, 1, 5, 0, 3, - 2, 1, 0, 0, 2, 0, 0, 0, - 0, 1, 4, 1, 0, 2, 1, 4, - 2, 0, 4, 3, 4, 2, 2, 6, - 2, 2, 4, 1, 4, 2, 4, 1, - 3, 3, 2, 2, 0, 1, 1, 1, - 0, 1, 0, 3, 3, 1, 2, 2, - 2, 0, 5, 1, 1, 0, 1, 0, - 1, 1, 1, 0, 0, 0, 0, 1, - 1, 1, 0, 0, 0, 0, 1, 1, - 5, 9, 2, 1, 3, 5, 3, 1, - 6, 1, 1, 2, 2, 2, 6, 0, - 0, 0, 4, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, -} - -var _graphclust_index_offsets []int16 = []int16{ - 0, 0, 2, 4, 6, 8, 11, 15, - 17, 20, 25, 28, 30, 32, 34, 63, - 68, 70, 73, 76, 80, 84, 90, 97, - 102, 106, 112, 115, 120, 123, 129, 135, - 138, 144, 146, 152, 155, 157, 161, 163, - 169, 171, 176, 178, 200, 203, 207, 212, - 214, 217, 220, 222, 225, 227, 230, 233, - 235, 241, 243, 246, 249, 252, 254, 256, - 262, 265, 271, 274, 281, 283, 285, 287, - 289, 291, 294, 296, 298, 314, 317, 319, - 321, 326, 329, 332, 334, 336, 339, 342, - 344, 348, 353, 357, 360, 364, 366, 369, - 377, 383, 385, 387, 389, 395, 397, 421, - 424, 426, 429, 432, 434, 437, 440, 443, - 445, 449, 457, 459, 461, 463, 465, 468, - 471, 473, 475, 477, 480, 483, 488, 490, - 492, 494, 496, 498, 500, 507, 511, 515, - 517, 520, 523, 527, 531, 537, 539, 541, - 545, 547, 549, 551, 553, 556, 560, 562, - 565, 570, 573, 575, 577, 579, 610, 615, - 617, 620, 626, 634, 640, 649, 654, 665, - 673, 678, 686, 690, 697, 701, 708, 714, - 723, 728, 737, 746, 750, 752, 757, 759, - 765, 768, 773, 775, 797, 803, 808, 814, - 816, 819, 822, 826, 831, 833, 836, 844, - 848, 858, 860, 867, 872, 880, 887, 892, - 900, 903, 909, 912, 914, 916, 918, 920, - 923, 925, 927, 943, 946, 948, 950, 957, - 962, 964, 967, 975, 978, 984, 989, 994, - 1001, 1007, 1011, 1013, 1016, 1024, 1030, 1032, - 1034, 1036, 1042, 1044, 1068, 1072, 1074, 1080, - 1084, 1086, 1092, 1096, 1103, 1107, 1113, 1122, - 1125, 1129, 1137, 1140, 1147, 1150, 1156, 1158, - 1164, 1169, 1174, 1180, 1185, 1187, 1189, 1191, - 1193, 1195, 1202, 1208, 1212, 1214, 1217, 1220, - 1224, 1228, 1234, 1236, 1238, 1240, 1242, 1244, - 1250, 1252, 1254, 1255, 1257, 1259, 1261, 1267, - 1269, 1271, 1272, 1279, 1281, 1285, 1289, 1291, - 1293, 1295, 1298, 1302, 1304, 1307, 1312, 1315, - 1317, 1319, 1321, 1350, 1355, 1357, 1360, 1363, - 1367, 1371, 1377, 1384, 1389, 1393, 1399, 1402, - 1407, 1410, 1416, 1422, 1425, 1431, 1433, 1439, - 1442, 1444, 1448, 1450, 1456, 1458, 1463, 1465, - 1487, 1490, 1494, 1499, 1501, 1504, 1507, 1509, - 1512, 1514, 1517, 1520, 1522, 1528, 1530, 1533, - 1536, 1539, 1541, 1543, 1549, 1552, 1558, 1561, - 1568, 1570, 1572, 1574, 1576, 1578, 1581, 1583, - 1585, 1601, 1604, 1606, 1608, 1613, 1616, 1619, - 1621, 1623, 1626, 1629, 1631, 1635, 1640, 1644, - 1647, 1651, 1653, 1656, 1664, 1670, 1672, 1674, - 1676, 1682, 1684, 1708, 1711, 1713, 1716, 1719, - 1721, 1724, 1727, 1730, 1732, 1736, 1744, 1746, - 1748, 1750, 1752, 1755, 1758, 1760, 1762, 1764, - 1767, 1770, 1775, 1777, 1779, 1781, 1783, 1785, - 1787, 1794, 1798, 1802, 1804, 1807, 1810, 1814, - 1818, 1824, 1826, 1828, 1832, 1834, 1836, 1838, - 1840, 1843, 1847, 1849, 1852, 1857, 1860, 1862, - 1864, 1866, 1897, 1902, 1904, 1907, 1913, 1921, - 1927, 1936, 1941, 1952, 1960, 1965, 1973, 1977, - 1984, 1988, 1995, 2001, 2010, 2015, 2024, 2033, - 2037, 2039, 2044, 2046, 2052, 2055, 2060, 2062, - 2084, 2090, 2095, 2101, 2103, 2106, 2109, 2113, - 2118, 2120, 2123, 2131, 2135, 2145, 2147, 2154, - 2159, 2167, 2174, 2179, 2187, 2190, 2196, 2199, - 2201, 2203, 2205, 2207, 2210, 2212, 2214, 2230, - 2233, 2235, 2237, 2244, 2249, 2251, 2254, 2262, - 2265, 2271, 2276, 2281, 2288, 2294, 2298, 2300, - 2303, 2311, 2317, 2319, 2321, 2323, 2329, 2331, - 2355, 2359, 2361, 2367, 2371, 2373, 2379, 2383, - 2390, 2394, 2400, 2409, 2412, 2416, 2424, 2427, - 2434, 2437, 2443, 2445, 2451, 2456, 2461, 2467, - 2472, 2474, 2476, 2478, 2480, 2482, 2489, 2495, - 2499, 2501, 2504, 2507, 2511, 2515, 2521, 2523, - 2525, 2527, 2529, 2531, 2537, 2539, 2541, 2542, - 2544, 2546, 2548, 2554, 2556, 2558, 2559, 2566, - 2568, 2571, 2575, 2578, 2581, 2585, 2588, 2591, - 2598, 2600, 2625, 2627, 2652, 2654, 2656, 2680, - 2682, 2684, 2686, 2688, 2691, 2693, 2697, 2699, - 2730, 2733, 2738, 2762, 2765, 2767, 2770, 2773, - 2777, 2780, 2783, 2787, 2788, 2844, 2900, 2930, - 2934, 2937, 2944, 2950, 2953, 2956, 2959, 2963, - 2965, 2983, 2987, 2992, 2995, 2998, 3002, 3005, - 3008, 3012, 3068, 3124, 3154, 3158, 3163, 3167, - 3169, 3173, 3179, 3183, 3186, 3190, 3193, 3196, - 3199, 3202, 3215, 3218, 3226, 3228, 3230, 3233, - 3239, 3251, 3257, 3261, 3266, 3272, 3277, 3280, - 3290, 3292, 3295, 3300, 3302, 3305, 3308, 3312, - 3315, 3318, 3325, 3327, 3329, 3331, 3333, 3336, - 3340, 3342, 3345, 3350, 3353, 3355, 3357, 3359, - 3388, 3393, 3395, 3398, 3401, 3405, 3409, 3415, - 3422, 3427, 3431, 3437, 3440, 3445, 3448, 3454, - 3460, 3463, 3469, 3471, 3477, 3480, 3482, 3486, - 3488, 3494, 3496, 3501, 3503, 3525, 3528, 3532, - 3537, 3539, 3542, 3545, 3547, 3550, 3552, 3555, - 3558, 3560, 3566, 3568, 3571, 3574, 3577, 3579, - 3581, 3587, 3590, 3596, 3599, 3606, 3608, 3610, - 3612, 3614, 3616, 3619, 3621, 3623, 3639, 3642, - 3644, 3646, 3651, 3654, 3657, 3659, 3661, 3664, - 3667, 3669, 3673, 3678, 3682, 3685, 3689, 3691, - 3694, 3702, 3708, 3710, 3712, 3714, 3720, 3722, - 3746, 3749, 3751, 3754, 3757, 3759, 3762, 3765, - 3768, 3770, 3774, 3782, 3784, 3786, 3788, 3790, - 3793, 3796, 3798, 3800, 3802, 3805, 3808, 3813, - 3815, 3817, 3819, 3821, 3823, 3825, 3832, 3836, - 3840, 3842, 3845, 3848, 3852, 3856, 3862, 3864, - 3866, 3870, 3872, 3874, 3876, 3878, 3881, 3885, - 3887, 3890, 3895, 3898, 3900, 3902, 3904, 3935, - 3940, 3942, 3945, 3951, 3959, 3965, 3974, 3979, - 3990, 3998, 4003, 4011, 4015, 4022, 4026, 4033, - 4039, 4048, 4053, 4062, 4071, 4075, 4077, 4082, - 4084, 4090, 4093, 4098, 4100, 4122, 4128, 4133, - 4139, 4141, 4144, 4147, 4151, 4156, 4158, 4161, - 4169, 4173, 4183, 4185, 4192, 4197, 4205, 4212, - 4217, 4225, 4228, 4234, 4237, 4239, 4241, 4243, - 4245, 4248, 4250, 4252, 4268, 4271, 4273, 4275, - 4282, 4287, 4289, 4292, 4300, 4303, 4309, 4314, - 4319, 4326, 4332, 4336, 4338, 4341, 4349, 4355, - 4357, 4359, 4361, 4367, 4369, 4393, 4397, 4399, - 4405, 4409, 4411, 4417, 4421, 4428, 4432, 4438, - 4447, 4450, 4454, 4462, 4465, 4472, 4475, 4481, - 4483, 4489, 4494, 4499, 4505, 4510, 4512, 4514, - 4516, 4518, 4520, 4527, 4533, 4537, 4539, 4542, - 4545, 4549, 4553, 4559, 4561, 4563, 4565, 4567, - 4569, 4575, 4577, 4579, 4580, 4582, 4584, 4586, - 4592, 4594, 4596, 4597, 4604, 4629, 4631, 4656, - 4658, 4660, 4684, 4686, 4688, 4690, 4692, 4695, - 4697, 4701, 4703, 4734, 4737, 4742, 4766, 4769, - 4771, 4774, 4777, 4781, 4784, 4787, 4791, 4792, - 4848, 4904, 4934, 4938, 4941, 4948, 4956, 4958, - 4960, 4962, 4965, 4969, 4971, 4974, 4979, 4982, - 4984, 4986, 4988, 5017, 5022, 5024, 5027, 5030, - 5034, 5038, 5044, 5051, 5056, 5060, 5066, 5069, - 5074, 5077, 5083, 5089, 5092, 5098, 5100, 5106, - 5109, 5111, 5115, 5117, 5123, 5125, 5130, 5132, - 5154, 5157, 5161, 5166, 5168, 5171, 5174, 5176, - 5179, 5181, 5184, 5187, 5189, 5195, 5197, 5200, - 5203, 5206, 5208, 5210, 5216, 5219, 5225, 5228, - 5230, 5232, 5234, 5236, 5239, 5241, 5243, 5259, - 5262, 5264, 5266, 5271, 5274, 5277, 5279, 5281, - 5284, 5287, 5289, 5293, 5298, 5302, 5305, 5309, - 5311, 5314, 5321, 5327, 5329, 5331, 5333, 5339, - 5341, 5365, 5368, 5370, 5373, 5376, 5378, 5381, - 5384, 5387, 5389, 5393, 5401, 5403, 5405, 5407, - 5409, 5412, 5415, 5417, 5419, 5421, 5424, 5427, - 5432, 5434, 5436, 5438, 5440, 5442, 5444, 5451, - 5455, 5459, 5461, 5464, 5467, 5471, 5475, 5481, - 5483, 5485, 5487, 5493, 5495, 5497, 5498, 5505, - 5507, 5515, 5519, 5521, 5523, 5525, 5527, 5530, - 5534, 5536, 5539, 5544, 5547, 5549, 5551, 5553, - 5584, 5589, 5591, 5594, 5600, 5608, 5614, 5623, - 5628, 5639, 5647, 5652, 5660, 5664, 5671, 5675, - 5682, 5688, 5697, 5702, 5711, 5720, 5724, 5726, - 5731, 5733, 5739, 5742, 5747, 5749, 5771, 5777, - 5782, 5788, 5790, 5793, 5796, 5800, 5805, 5807, - 5810, 5818, 5822, 5832, 5834, 5841, 5846, 5854, - 5861, 5866, 5874, 5877, 5883, 5886, 5888, 5890, - 5892, 5894, 5897, 5899, 5901, 5917, 5920, 5922, - 5924, 5931, 5936, 5938, 5941, 5949, 5952, 5958, - 5963, 5968, 5975, 5981, 5985, 5987, 5990, 5998, - 6004, 6006, 6008, 6010, 6016, 6018, 6042, 6046, - 6048, 6054, 6058, 6060, 6066, 6070, 6077, 6081, - 6087, 6096, 6099, 6103, 6111, 6114, 6121, 6124, - 6130, 6132, 6138, 6143, 6148, 6154, 6159, 6161, - 6163, 6165, 6167, 6169, 6176, 6182, 6186, 6188, - 6191, 6194, 6198, 6202, 6208, 6210, 6212, 6214, - 6216, 6218, 6224, 6226, 6228, 6229, 6231, 6233, - 6237, 6240, 6242, 6244, 6246, 6249, 6253, 6255, - 6258, 6263, 6266, 6268, 6270, 6272, 6303, 6308, - 6310, 6313, 6319, 6321, 6323, 6325, 6328, 6332, - 6334, 6337, 6342, 6345, 6347, 6349, 6351, 6380, - 6385, 6387, 6390, 6393, 6397, 6401, 6407, 6414, - 6419, 6423, 6429, 6432, 6437, 6440, 6446, 6452, - 6455, 6461, 6463, 6469, 6472, 6474, 6478, 6480, - 6486, 6488, 6493, 6495, 6517, 6520, 6524, 6529, - 6531, 6534, 6537, 6539, 6542, 6544, 6547, 6550, - 6552, 6558, 6560, 6563, 6566, 6569, 6571, 6573, - 6579, 6582, 6588, 6591, 6598, 6600, 6602, 6604, - 6606, 6608, 6611, 6613, 6615, 6631, 6634, 6636, - 6638, 6643, 6646, 6649, 6651, 6653, 6656, 6659, - 6661, 6665, 6670, 6674, 6677, 6681, 6683, 6686, - 6694, 6700, 6702, 6704, 6706, 6712, 6714, 6738, - 6741, 6743, 6746, 6749, 6751, 6754, 6757, 6760, - 6762, 6766, 6774, 6776, 6778, 6780, 6782, 6785, - 6788, 6790, 6792, 6794, 6797, 6800, 6805, 6807, - 6809, 6811, 6813, 6815, 6817, 6824, 6828, 6832, - 6834, 6837, 6840, 6844, 6848, 6854, 6856, 6858, - 6862, 6864, 6866, 6868, 6870, 6876, 6878, 6880, - 6881, 6888, 6896, 6902, 6911, 6916, 6927, 6935, - 6940, 6948, 6952, 6959, 6963, 6970, 6976, 6985, - 6990, 6999, 7008, 7012, 7014, 7019, 7021, 7027, - 7030, 7035, 7037, 7059, 7065, 7070, 7076, 7078, - 7081, 7084, 7088, 7093, 7095, 7098, 7106, 7110, - 7120, 7122, 7129, 7134, 7142, 7149, 7154, 7162, - 7165, 7171, 7174, 7176, 7178, 7180, 7182, 7185, - 7187, 7189, 7205, 7208, 7210, 7212, 7219, 7224, - 7226, 7229, 7237, 7240, 7246, 7251, 7256, 7263, - 7269, 7273, 7275, 7278, 7286, 7292, 7294, 7296, - 7298, 7304, 7306, 7330, 7334, 7336, 7342, 7346, - 7348, 7354, 7358, 7365, 7369, 7375, 7384, 7387, - 7391, 7399, 7402, 7409, 7412, 7418, 7420, 7426, - 7431, 7436, 7442, 7447, 7449, 7451, 7453, 7455, - 7457, 7464, 7470, 7474, 7476, 7479, 7482, 7486, - 7490, 7496, 7498, 7500, 7502, 7504, 7506, 7512, - 7514, 7516, 7517, 7520, 7524, 7526, 7544, 7548, - 7553, 7556, 7559, 7563, 7566, 7569, 7573, 7629, - 7685, 7718, 7722, 7727, 7729, 7730, 7732, 7736, - 7739, 7744, 7750, 7754, 7757, 7761, 7764, 7768, - 7771, 7775, 7788, 7791, 7793, 7795, 7797, 7800, - 7804, 7806, 7809, 7814, 7817, 7819, 7821, 7823, - 7852, 7857, 7859, 7862, 7865, 7869, 7873, 7879, - 7886, 7891, 7895, 7901, 7904, 7909, 7912, 7918, - 7924, 7927, 7933, 7935, 7941, 7944, 7946, 7950, - 7952, 7958, 7960, 7965, 7967, 7989, 7992, 7996, - 8001, 8003, 8006, 8009, 8011, 8014, 8016, 8019, - 8022, 8024, 8030, 8032, 8035, 8038, 8041, 8043, - 8045, 8051, 8054, 8060, 8063, 8070, 8072, 8074, - 8076, 8078, 8080, 8083, 8085, 8087, 8103, 8106, - 8108, 8110, 8115, 8118, 8121, 8123, 8125, 8128, - 8131, 8133, 8137, 8142, 8146, 8149, 8153, 8155, - 8158, 8166, 8172, 8174, 8176, 8178, 8184, 8186, - 8210, 8213, 8215, 8218, 8221, 8223, 8226, 8229, - 8232, 8234, 8238, 8246, 8248, 8250, 8252, 8254, - 8257, 8260, 8262, 8264, 8266, 8269, 8272, 8277, - 8279, 8281, 8283, 8285, 8287, 8289, 8296, 8300, - 8304, 8306, 8309, 8312, 8316, 8320, 8326, 8328, - 8330, 8334, 8336, 8338, 8340, 8342, 8345, 8349, - 8351, 8354, 8359, 8362, 8364, 8366, 8368, 8399, - 8404, 8406, 8409, 8415, 8423, 8429, 8438, 8443, - 8454, 8462, 8467, 8475, 8479, 8486, 8490, 8497, - 8503, 8512, 8517, 8526, 8535, 8539, 8541, 8546, - 8548, 8554, 8557, 8562, 8564, 8586, 8592, 8597, - 8603, 8605, 8608, 8611, 8615, 8620, 8622, 8625, - 8633, 8637, 8647, 8649, 8656, 8661, 8669, 8676, - 8681, 8689, 8692, 8698, 8701, 8703, 8705, 8707, - 8709, 8712, 8714, 8716, 8732, 8735, 8737, 8739, - 8746, 8751, 8753, 8756, 8764, 8767, 8773, 8778, - 8783, 8790, 8796, 8800, 8802, 8805, 8813, 8819, - 8821, 8823, 8825, 8831, 8833, 8857, 8861, 8863, - 8869, 8873, 8875, 8881, 8885, 8892, 8896, 8902, - 8911, 8914, 8918, 8926, 8929, 8936, 8939, 8945, - 8947, 8953, 8958, 8963, 8969, 8974, 8976, 8978, - 8980, 8982, 8984, 8991, 8997, 9001, 9003, 9006, - 9009, 9013, 9017, 9023, 9025, 9027, 9029, 9031, - 9033, 9039, 9041, 9043, 9044, 9046, 9048, 9050, - 9056, 9058, 9060, 9061, 9068, 9076, 9078, 9080, - 9083, 9089, 9101, 9107, 9111, 9116, 9122, 9127, - 9130, 9140, 9142, 9145, 9153, 9156, 9159, 9183, - 9204, 9225, 9246, 9265, 9286, 9307, 9328, 9352, - 9374, 9396, 9418, 9439, 9463, 9484, 9505, 9526, - 9548, 9570, 9592, 9613, 9634, 9655, 9676, 9697, - 9718, 9739, 9760, 9781, -} - -var _graphclust_indicies []int16 = []int16{ - 0, 1, 3, 2, 2, 3, 3, 2, - 3, 3, 2, 3, 3, 3, 2, 3, - 2, 3, 3, 2, 3, 3, 3, 3, - 2, 3, 3, 2, 2, 3, 3, 2, - 3, 2, 4, 5, 6, 7, 8, 10, - 11, 12, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 31, 9, 13, 2, 3, - 3, 3, 3, 2, 3, 2, 3, 3, - 2, 2, 2, 3, 2, 2, 2, 3, - 3, 3, 3, 2, 2, 2, 2, 2, - 2, 3, 2, 2, 2, 2, 2, 2, - 3, 2, 2, 2, 2, 3, 3, 3, - 3, 2, 3, 3, 3, 3, 3, 2, - 3, 3, 2, 3, 3, 3, 3, 2, - 3, 3, 2, 2, 2, 2, 2, 2, - 3, 3, 3, 3, 3, 3, 2, 3, - 3, 2, 2, 2, 2, 2, 2, 3, - 3, 2, 3, 3, 3, 3, 3, 2, - 3, 3, 2, 3, 2, 3, 3, 3, - 2, 3, 2, 3, 3, 3, 3, 3, - 2, 3, 2, 3, 3, 3, 3, 2, - 3, 2, 32, 33, 34, 35, 36, 37, - 38, 39, 40, 41, 42, 43, 44, 45, - 46, 47, 48, 49, 50, 51, 52, 2, - 3, 3, 2, 3, 3, 3, 2, 3, - 3, 3, 3, 2, 3, 2, 3, 3, - 2, 3, 3, 2, 3, 2, 2, 2, - 3, 3, 2, 3, 3, 2, 3, 3, - 2, 3, 2, 3, 3, 3, 3, 3, - 2, 3, 2, 3, 3, 2, 2, 2, - 3, 3, 3, 2, 3, 2, 3, 2, - 3, 3, 3, 3, 3, 2, 3, 3, - 2, 53, 54, 55, 56, 57, 2, 3, - 58, 2, 53, 54, 59, 55, 56, 57, - 2, 3, 2, 3, 2, 3, 2, 3, - 2, 3, 2, 60, 61, 2, 3, 2, - 3, 2, 62, 63, 64, 65, 66, 67, - 68, 69, 70, 71, 72, 73, 74, 75, - 76, 2, 3, 3, 2, 3, 2, 3, - 2, 3, 3, 3, 3, 2, 3, 3, - 2, 2, 2, 3, 3, 2, 3, 2, - 3, 3, 2, 2, 2, 3, 3, 2, - 3, 3, 3, 2, 3, 3, 3, 3, - 2, 3, 3, 3, 2, 3, 3, 2, - 77, 78, 63, 2, 3, 2, 3, 3, - 2, 79, 80, 81, 82, 83, 84, 85, - 2, 86, 87, 88, 89, 90, 2, 3, - 2, 3, 2, 3, 2, 3, 3, 3, - 3, 3, 2, 3, 2, 91, 92, 93, - 94, 95, 96, 97, 98, 99, 100, 101, - 102, 103, 104, 105, 106, 107, 104, 108, - 109, 110, 111, 112, 2, 3, 3, 2, - 2, 3, 2, 2, 3, 3, 3, 2, - 3, 2, 3, 3, 2, 2, 2, 3, - 3, 3, 2, 3, 2, 3, 3, 3, - 2, 3, 3, 3, 3, 3, 3, 3, - 2, 3, 2, 3, 2, 3, 2, 2, - 3, 3, 3, 2, 2, 2, 3, 2, - 3, 3, 2, 3, 2, 3, 3, 2, - 3, 3, 2, 113, 114, 115, 116, 2, - 3, 2, 3, 2, 3, 2, 3, 2, - 117, 2, 3, 2, 118, 119, 120, 121, - 122, 123, 2, 3, 3, 3, 2, 2, - 2, 2, 3, 3, 2, 3, 3, 2, - 2, 2, 3, 3, 3, 3, 2, 124, - 125, 126, 2, 3, 3, 3, 3, 3, - 2, 3, 2, 3, 2, 127, 128, 129, - 2, 130, 2, 2, 130, 2, 130, 130, - 2, 130, 130, 2, 130, 130, 130, 2, - 130, 2, 130, 130, 2, 130, 130, 130, - 130, 2, 130, 130, 2, 2, 130, 130, - 2, 130, 2, 131, 132, 133, 134, 135, - 136, 137, 139, 140, 141, 142, 143, 144, - 145, 146, 147, 148, 149, 150, 22, 151, - 152, 153, 154, 155, 156, 157, 158, 159, - 138, 2, 130, 130, 130, 130, 2, 130, - 2, 130, 130, 2, 3, 3, 2, 2, - 3, 130, 130, 2, 130, 130, 2, 130, - 2, 3, 130, 130, 130, 3, 3, 2, - 130, 130, 130, 2, 2, 2, 130, 2, - 3, 3, 130, 130, 3, 2, 130, 130, - 130, 2, 130, 2, 130, 2, 130, 2, - 3, 2, 2, 130, 130, 2, 130, 2, - 3, 130, 130, 3, 130, 2, 3, 130, - 130, 3, 3, 130, 130, 2, 130, 130, - 3, 2, 130, 130, 130, 3, 3, 3, - 2, 130, 3, 130, 2, 2, 2, 3, - 2, 2, 2, 130, 130, 130, 3, 130, - 3, 2, 130, 130, 3, 3, 3, 130, - 130, 130, 2, 130, 130, 3, 3, 2, - 2, 2, 130, 130, 130, 2, 130, 2, - 3, 130, 130, 130, 130, 3, 130, 3, - 3, 2, 130, 3, 130, 2, 130, 2, - 130, 3, 130, 130, 2, 130, 2, 130, - 130, 130, 130, 3, 2, 3, 130, 2, - 130, 130, 130, 130, 2, 130, 2, 160, - 161, 162, 163, 164, 165, 166, 167, 168, - 169, 170, 171, 172, 173, 174, 175, 176, - 177, 178, 179, 180, 2, 3, 130, 130, - 3, 130, 2, 3, 130, 130, 130, 2, - 130, 3, 130, 130, 130, 2, 130, 2, - 130, 130, 2, 130, 130, 2, 3, 130, - 3, 2, 130, 130, 130, 2, 3, 130, - 2, 130, 130, 2, 130, 130, 3, 130, - 3, 3, 130, 2, 130, 130, 3, 2, - 130, 130, 130, 130, 3, 130, 130, 3, - 130, 2, 130, 2, 3, 3, 3, 130, - 130, 3, 2, 130, 2, 130, 2, 3, - 3, 3, 3, 130, 130, 3, 130, 2, - 3, 130, 130, 3, 130, 3, 2, 3, - 130, 3, 130, 2, 3, 130, 130, 130, - 130, 3, 130, 2, 130, 130, 2, 181, - 182, 183, 184, 185, 2, 130, 58, 2, - 130, 2, 130, 2, 130, 2, 130, 2, - 186, 187, 2, 130, 2, 130, 2, 188, - 189, 190, 191, 66, 192, 193, 194, 195, - 196, 197, 198, 199, 200, 201, 2, 130, - 130, 2, 130, 2, 130, 2, 130, 130, - 130, 3, 3, 130, 2, 130, 2, 130, - 2, 3, 130, 2, 130, 3, 2, 3, - 130, 130, 130, 3, 130, 3, 2, 130, - 2, 3, 130, 3, 130, 3, 130, 2, - 130, 130, 3, 130, 2, 130, 130, 130, - 130, 2, 130, 3, 3, 130, 130, 3, - 2, 130, 130, 3, 130, 3, 2, 202, - 203, 189, 2, 130, 2, 130, 130, 2, - 204, 205, 206, 207, 208, 209, 210, 2, - 211, 212, 213, 214, 215, 2, 130, 2, - 130, 2, 130, 2, 130, 130, 130, 130, - 130, 2, 130, 2, 216, 217, 218, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 228, 229, 230, 231, 232, 233, 234, 235, - 236, 237, 238, 2, 130, 3, 130, 2, - 2, 130, 3, 2, 3, 3, 2, 130, - 3, 130, 130, 2, 130, 2, 3, 130, - 3, 130, 3, 2, 2, 130, 2, 3, - 130, 130, 3, 130, 3, 130, 2, 130, - 3, 130, 2, 130, 130, 3, 130, 3, - 2, 130, 130, 3, 3, 3, 3, 130, - 130, 2, 3, 130, 2, 3, 3, 130, - 2, 130, 3, 130, 3, 130, 3, 130, - 2, 3, 2, 130, 130, 3, 3, 130, - 3, 130, 2, 2, 2, 130, 130, 3, - 130, 3, 130, 2, 2, 130, 3, 3, - 130, 3, 130, 2, 3, 130, 3, 130, - 2, 3, 3, 130, 130, 2, 3, 3, - 3, 130, 130, 2, 239, 240, 115, 241, - 2, 130, 2, 130, 2, 130, 2, 242, - 2, 130, 2, 243, 244, 245, 246, 247, - 248, 2, 3, 3, 130, 130, 130, 2, - 2, 2, 2, 130, 130, 2, 130, 130, - 2, 2, 2, 130, 130, 130, 130, 2, - 249, 250, 251, 2, 130, 130, 130, 130, - 130, 2, 130, 2, 130, 2, 252, 2, - 3, 2, 253, 2, 254, 255, 256, 258, - 257, 2, 130, 2, 2, 130, 130, 3, - 2, 3, 2, 259, 2, 260, 261, 262, - 264, 263, 2, 3, 2, 2, 3, 3, - 79, 80, 81, 82, 83, 84, 2, 3, - 1, 265, 265, 3, 1, 265, 266, 3, - 1, 267, 268, 267, 268, 268, 267, 268, - 268, 267, 268, 268, 268, 267, 268, 267, - 268, 268, 267, 268, 268, 268, 268, 267, - 268, 268, 267, 267, 268, 268, 267, 268, - 267, 269, 270, 271, 272, 273, 275, 276, - 277, 279, 280, 281, 282, 283, 284, 285, - 286, 287, 288, 289, 290, 291, 292, 293, - 294, 295, 296, 274, 278, 267, 268, 268, - 268, 268, 267, 268, 267, 268, 268, 267, - 267, 267, 268, 267, 267, 267, 268, 268, - 268, 268, 267, 267, 267, 267, 267, 267, - 268, 267, 267, 267, 267, 267, 267, 268, - 267, 267, 267, 267, 268, 268, 268, 268, - 267, 268, 268, 268, 268, 268, 267, 268, - 268, 267, 268, 268, 268, 268, 267, 268, - 268, 267, 267, 267, 267, 267, 267, 268, - 268, 268, 268, 268, 268, 267, 268, 268, - 267, 267, 267, 267, 267, 267, 268, 268, - 267, 268, 268, 268, 268, 268, 267, 268, - 268, 267, 268, 267, 268, 268, 268, 267, - 268, 267, 268, 268, 268, 268, 268, 267, - 268, 267, 268, 268, 268, 268, 267, 268, - 267, 297, 298, 299, 300, 301, 302, 303, - 304, 305, 306, 307, 308, 309, 310, 311, - 312, 313, 314, 315, 316, 317, 267, 268, - 268, 267, 268, 268, 268, 267, 268, 268, - 268, 268, 267, 268, 267, 268, 268, 267, - 268, 268, 267, 268, 267, 267, 267, 268, - 268, 267, 268, 268, 267, 268, 268, 267, - 268, 267, 268, 268, 268, 268, 268, 267, - 268, 267, 268, 268, 267, 267, 267, 268, - 268, 268, 267, 268, 267, 268, 267, 268, - 268, 268, 268, 268, 267, 268, 268, 267, - 318, 319, 320, 321, 322, 267, 268, 323, - 267, 318, 319, 324, 320, 321, 322, 267, - 268, 267, 268, 267, 268, 267, 268, 267, - 268, 267, 325, 326, 267, 268, 267, 268, - 267, 327, 328, 329, 330, 331, 332, 333, - 334, 335, 336, 337, 338, 339, 340, 341, - 267, 268, 268, 267, 268, 267, 268, 267, - 268, 268, 268, 268, 267, 268, 268, 267, - 267, 267, 268, 268, 267, 268, 267, 268, - 268, 267, 267, 267, 268, 268, 267, 268, - 268, 268, 267, 268, 268, 268, 268, 267, - 268, 268, 268, 267, 268, 268, 267, 342, - 343, 328, 267, 268, 267, 268, 268, 267, - 344, 345, 346, 347, 348, 349, 350, 267, - 351, 352, 353, 354, 355, 267, 268, 267, - 268, 267, 268, 267, 268, 268, 268, 268, - 268, 267, 268, 267, 356, 357, 358, 359, - 360, 361, 362, 363, 364, 365, 366, 367, - 368, 369, 370, 371, 372, 369, 373, 374, - 375, 376, 377, 267, 268, 268, 267, 267, - 268, 267, 267, 268, 268, 268, 267, 268, - 267, 268, 268, 267, 267, 267, 268, 268, - 268, 267, 268, 267, 268, 268, 268, 267, - 268, 268, 268, 268, 268, 268, 268, 267, - 268, 267, 268, 267, 268, 267, 267, 268, - 268, 268, 267, 267, 267, 268, 267, 268, - 268, 267, 268, 267, 268, 268, 267, 268, - 268, 267, 378, 379, 380, 381, 267, 268, - 267, 268, 267, 268, 267, 268, 267, 382, - 267, 268, 267, 383, 384, 385, 386, 387, - 388, 267, 268, 268, 268, 267, 267, 267, - 267, 268, 268, 267, 268, 268, 267, 267, - 267, 268, 268, 268, 268, 267, 389, 390, - 391, 267, 268, 268, 268, 268, 268, 267, - 268, 267, 268, 267, 392, 393, 394, 267, - 395, 267, 395, 267, 267, 395, 395, 267, - 395, 395, 267, 395, 395, 395, 267, 395, - 267, 395, 395, 267, 395, 395, 395, 395, - 267, 395, 395, 267, 267, 395, 395, 267, - 395, 267, 396, 397, 398, 399, 400, 401, - 402, 404, 405, 406, 407, 408, 409, 410, - 411, 412, 413, 414, 415, 287, 416, 417, - 418, 419, 420, 421, 422, 423, 424, 403, - 267, 395, 395, 395, 395, 267, 395, 267, - 395, 395, 267, 268, 268, 267, 267, 268, - 395, 395, 267, 395, 395, 267, 395, 267, - 268, 395, 395, 395, 268, 268, 267, 395, - 395, 395, 267, 267, 267, 395, 267, 268, - 268, 395, 395, 268, 267, 395, 395, 395, - 267, 395, 267, 395, 267, 395, 267, 268, - 267, 267, 395, 395, 267, 395, 267, 268, - 395, 395, 268, 395, 267, 268, 395, 395, - 268, 268, 395, 395, 267, 395, 395, 268, - 267, 395, 395, 395, 268, 268, 268, 267, - 395, 268, 395, 267, 267, 267, 268, 267, - 267, 267, 395, 395, 395, 268, 395, 268, - 267, 395, 395, 268, 268, 268, 395, 395, - 395, 267, 395, 395, 268, 268, 267, 267, - 267, 395, 395, 395, 267, 395, 267, 268, - 395, 395, 395, 395, 268, 395, 268, 268, - 267, 395, 268, 395, 267, 395, 267, 395, - 268, 395, 395, 267, 395, 267, 395, 395, - 395, 395, 268, 267, 268, 395, 267, 395, - 395, 395, 395, 267, 395, 267, 425, 426, - 427, 428, 429, 430, 431, 432, 433, 434, - 435, 436, 437, 438, 439, 440, 441, 442, - 443, 444, 445, 267, 268, 395, 395, 268, - 395, 267, 268, 395, 395, 395, 267, 395, - 268, 395, 395, 395, 267, 395, 267, 395, - 395, 267, 395, 395, 267, 268, 395, 268, - 267, 395, 395, 395, 267, 268, 395, 267, - 395, 395, 267, 395, 395, 268, 395, 268, - 268, 395, 267, 395, 395, 268, 267, 395, - 395, 395, 395, 268, 395, 395, 268, 395, - 267, 395, 267, 268, 268, 268, 395, 395, - 268, 267, 395, 267, 395, 267, 268, 268, - 268, 268, 395, 395, 268, 395, 267, 268, - 395, 395, 268, 395, 268, 267, 268, 395, - 268, 395, 267, 268, 395, 395, 395, 395, - 268, 395, 267, 395, 395, 267, 446, 447, - 448, 449, 450, 267, 395, 323, 267, 395, - 267, 395, 267, 395, 267, 395, 267, 451, - 452, 267, 395, 267, 395, 267, 453, 454, - 455, 456, 331, 457, 458, 459, 460, 461, - 462, 463, 464, 465, 466, 267, 395, 395, - 267, 395, 267, 395, 267, 395, 395, 395, - 268, 268, 395, 267, 395, 267, 395, 267, - 268, 395, 267, 395, 268, 267, 268, 395, - 395, 395, 268, 395, 268, 267, 395, 267, - 268, 395, 268, 395, 268, 395, 267, 395, - 395, 268, 395, 267, 395, 395, 395, 395, - 267, 395, 268, 268, 395, 395, 268, 267, - 395, 395, 268, 395, 268, 267, 467, 468, - 454, 267, 395, 267, 395, 395, 267, 469, - 470, 471, 472, 473, 474, 475, 267, 476, - 477, 478, 479, 480, 267, 395, 267, 395, - 267, 395, 267, 395, 395, 395, 395, 395, - 267, 395, 267, 481, 482, 483, 484, 485, - 486, 487, 488, 489, 490, 491, 492, 493, - 494, 495, 496, 497, 498, 499, 500, 501, - 502, 503, 267, 395, 268, 395, 267, 267, - 395, 268, 267, 268, 268, 267, 395, 268, - 395, 395, 267, 395, 267, 268, 395, 268, - 395, 268, 267, 267, 395, 267, 268, 395, - 395, 268, 395, 268, 395, 267, 395, 268, - 395, 267, 395, 395, 268, 395, 268, 267, - 395, 395, 268, 268, 268, 268, 395, 395, - 267, 268, 395, 267, 268, 268, 395, 267, - 395, 268, 395, 268, 395, 268, 395, 267, - 268, 267, 395, 395, 268, 268, 395, 268, - 395, 267, 267, 267, 395, 395, 268, 395, - 268, 395, 267, 267, 395, 268, 268, 395, - 268, 395, 267, 268, 395, 268, 395, 267, - 268, 268, 395, 395, 267, 268, 268, 268, - 395, 395, 267, 504, 505, 380, 506, 267, - 395, 267, 395, 267, 395, 267, 507, 267, - 395, 267, 508, 509, 510, 511, 512, 513, - 267, 268, 268, 395, 395, 395, 267, 267, - 267, 267, 395, 395, 267, 395, 395, 267, - 267, 267, 395, 395, 395, 395, 267, 514, - 515, 516, 267, 395, 395, 395, 395, 395, - 267, 395, 267, 395, 267, 517, 267, 268, - 267, 518, 267, 519, 520, 521, 523, 522, - 267, 395, 267, 267, 395, 395, 268, 267, - 268, 267, 524, 267, 525, 526, 527, 529, - 528, 267, 268, 267, 267, 268, 268, 344, - 345, 346, 347, 348, 349, 267, 268, 267, - 268, 268, 267, 266, 268, 268, 267, 266, - 268, 267, 266, 268, 267, 531, 532, 530, - 267, 266, 268, 267, 266, 268, 267, 533, - 534, 535, 536, 537, 530, 267, 538, 267, - 297, 298, 299, 533, 534, 539, 300, 301, - 302, 303, 304, 305, 306, 307, 308, 309, - 310, 311, 312, 313, 314, 315, 316, 317, - 267, 540, 538, 297, 298, 299, 541, 535, - 536, 300, 301, 302, 303, 304, 305, 306, - 307, 308, 309, 310, 311, 312, 313, 314, - 315, 316, 317, 267, 540, 267, 542, 540, - 297, 298, 299, 543, 536, 300, 301, 302, - 303, 304, 305, 306, 307, 308, 309, 310, - 311, 312, 313, 314, 315, 316, 317, 267, - 542, 267, 267, 542, 544, 267, 542, 267, - 545, 546, 267, 540, 267, 267, 542, 267, - 540, 267, 540, 327, 328, 329, 330, 331, - 332, 333, 547, 335, 336, 337, 338, 339, - 340, 341, 549, 550, 551, 552, 553, 554, - 549, 550, 551, 552, 553, 554, 549, 548, - 555, 267, 268, 538, 267, 556, 556, 556, - 542, 267, 297, 298, 299, 541, 539, 300, - 301, 302, 303, 304, 305, 306, 307, 308, - 309, 310, 311, 312, 313, 314, 315, 316, - 317, 267, 545, 557, 267, 267, 540, 556, - 556, 542, 556, 556, 542, 556, 556, 556, - 542, 556, 556, 542, 556, 556, 542, 556, - 556, 267, 542, 542, 551, 552, 553, 554, - 548, 549, 551, 552, 553, 554, 548, 549, - 551, 552, 553, 554, 548, 549, 551, 552, - 553, 554, 548, 549, 551, 552, 553, 554, - 548, 549, 551, 552, 553, 554, 548, 549, - 551, 552, 553, 554, 548, 549, 551, 552, - 553, 554, 548, 549, 551, 552, 553, 554, - 548, 549, 550, 555, 552, 553, 554, 548, - 549, 550, 552, 553, 554, 548, 549, 550, - 552, 553, 554, 548, 549, 550, 552, 553, - 554, 548, 549, 550, 552, 553, 554, 548, - 549, 550, 552, 553, 554, 548, 549, 550, - 552, 553, 554, 548, 549, 550, 552, 553, - 554, 548, 549, 550, 552, 553, 554, 548, - 549, 550, 551, 555, 553, 554, 548, 549, - 550, 551, 553, 554, 548, 549, 550, 551, - 553, 554, 548, 549, 550, 551, 553, 554, - 548, 549, 550, 551, 553, 558, 557, 552, - 267, 555, 556, 267, 540, 542, 268, 268, - 267, 559, 560, 561, 562, 563, 530, 267, - 268, 323, 268, 268, 268, 267, 268, 268, - 267, 395, 268, 267, 395, 268, 267, 268, - 395, 268, 267, 530, 267, 564, 566, 567, - 568, 569, 570, 571, 566, 567, 568, 569, - 570, 571, 566, 530, 565, 555, 267, 268, - 538, 268, 267, 540, 540, 540, 542, 267, - 540, 540, 542, 540, 540, 542, 540, 540, - 540, 542, 540, 540, 542, 540, 540, 542, - 540, 540, 267, 542, 568, 569, 570, 571, - 565, 566, 568, 569, 570, 571, 565, 566, - 568, 569, 570, 571, 565, 566, 568, 569, - 570, 571, 565, 566, 568, 569, 570, 571, - 565, 566, 568, 569, 570, 571, 565, 566, - 568, 569, 570, 571, 565, 566, 568, 569, - 570, 571, 565, 566, 568, 569, 570, 571, - 565, 566, 567, 555, 569, 570, 571, 565, - 566, 567, 569, 570, 571, 565, 566, 567, - 569, 570, 571, 565, 566, 567, 569, 570, - 571, 565, 566, 567, 569, 570, 571, 565, - 566, 567, 569, 570, 571, 565, 566, 567, - 569, 570, 571, 565, 566, 567, 569, 570, - 571, 565, 566, 567, 569, 570, 571, 565, - 566, 567, 568, 555, 570, 571, 565, 566, - 567, 568, 570, 571, 565, 566, 567, 568, - 570, 571, 565, 566, 567, 568, 570, 571, - 565, 566, 567, 568, 570, 572, 573, 569, - 267, 555, 540, 268, 540, 542, 268, 542, - 268, 267, 540, 574, 575, 530, 267, 268, - 267, 268, 268, 268, 267, 577, 578, 579, - 580, 576, 267, 581, 582, 530, 267, 266, - 268, 267, 268, 266, 268, 267, 583, 530, - 267, 268, 268, 267, 584, 530, 267, 268, - 268, 267, 585, 586, 587, 588, 589, 590, - 591, 592, 593, 594, 595, 530, 267, 268, - 596, 267, 344, 345, 346, 347, 348, 349, - 597, 267, 598, 267, 268, 267, 395, 268, - 267, 268, 395, 268, 395, 268, 267, 395, - 395, 268, 395, 268, 395, 268, 395, 268, - 395, 268, 267, 268, 268, 395, 395, 268, - 267, 395, 395, 268, 267, 395, 268, 395, - 268, 267, 268, 395, 268, 395, 268, 267, - 395, 268, 395, 268, 267, 395, 268, 267, - 395, 395, 268, 268, 395, 268, 395, 268, - 395, 267, 576, 267, 599, 576, 267, 322, - 530, 600, 530, 267, 268, 267, 266, 3, - 1, 266, 3, 1, 602, 603, 601, 1, - 266, 3, 1, 266, 3, 1, 604, 605, - 606, 607, 608, 601, 1, 609, 610, 612, - 611, 611, 612, 612, 611, 612, 612, 611, - 612, 612, 612, 611, 612, 611, 612, 612, - 611, 612, 612, 612, 612, 611, 612, 612, - 611, 611, 612, 612, 611, 612, 611, 613, - 614, 615, 616, 617, 619, 620, 621, 623, - 624, 625, 626, 627, 628, 629, 630, 631, - 632, 633, 634, 635, 636, 637, 638, 639, - 640, 618, 622, 611, 612, 612, 612, 612, - 611, 612, 611, 612, 612, 611, 611, 611, - 612, 611, 611, 611, 612, 612, 612, 612, - 611, 611, 611, 611, 611, 611, 612, 611, - 611, 611, 611, 611, 611, 612, 611, 611, - 611, 611, 612, 612, 612, 612, 611, 612, - 612, 612, 612, 612, 611, 612, 612, 611, - 612, 612, 612, 612, 611, 612, 612, 611, - 611, 611, 611, 611, 611, 612, 612, 612, - 612, 612, 612, 611, 612, 612, 611, 611, - 611, 611, 611, 611, 612, 612, 611, 612, - 612, 612, 612, 612, 611, 612, 612, 611, - 612, 611, 612, 612, 612, 611, 612, 611, - 612, 612, 612, 612, 612, 611, 612, 611, - 612, 612, 612, 612, 611, 612, 611, 641, - 642, 643, 644, 645, 646, 647, 648, 649, - 650, 651, 652, 653, 654, 655, 656, 657, - 658, 659, 660, 661, 611, 612, 612, 611, - 612, 612, 612, 611, 612, 612, 612, 612, - 611, 612, 611, 612, 612, 611, 612, 612, - 611, 612, 611, 611, 611, 612, 612, 611, - 612, 612, 611, 612, 612, 611, 612, 611, - 612, 612, 612, 612, 612, 611, 612, 611, - 612, 612, 611, 611, 611, 612, 612, 612, - 611, 612, 611, 612, 611, 612, 612, 612, - 612, 612, 611, 612, 612, 611, 662, 663, - 664, 665, 666, 611, 612, 667, 611, 662, - 663, 668, 664, 665, 666, 611, 612, 611, - 612, 611, 612, 611, 612, 611, 612, 611, - 669, 670, 611, 612, 611, 612, 611, 671, - 672, 673, 674, 675, 676, 677, 678, 679, - 680, 681, 682, 683, 684, 685, 611, 612, - 612, 611, 612, 611, 612, 611, 612, 612, - 612, 612, 611, 612, 612, 611, 611, 611, - 612, 612, 611, 612, 611, 612, 612, 611, - 611, 611, 612, 612, 611, 612, 612, 612, - 611, 612, 612, 612, 612, 611, 612, 612, - 612, 611, 612, 612, 611, 686, 687, 672, - 611, 612, 611, 612, 612, 611, 688, 689, - 690, 691, 692, 693, 694, 611, 695, 696, - 697, 698, 699, 611, 612, 611, 612, 611, - 612, 611, 612, 612, 612, 612, 612, 611, - 612, 611, 700, 701, 702, 703, 704, 705, - 706, 707, 708, 709, 710, 711, 712, 713, - 714, 715, 716, 713, 717, 718, 719, 720, - 721, 611, 612, 612, 611, 611, 612, 611, - 611, 612, 612, 612, 611, 612, 611, 612, - 612, 611, 611, 611, 612, 612, 612, 611, - 612, 611, 612, 612, 612, 611, 612, 612, - 612, 612, 612, 612, 612, 611, 612, 611, - 612, 611, 612, 611, 611, 612, 612, 612, - 611, 611, 611, 612, 611, 612, 612, 611, - 612, 611, 612, 612, 611, 612, 612, 611, - 722, 723, 724, 725, 611, 612, 611, 612, - 611, 612, 611, 612, 611, 726, 611, 612, - 611, 727, 728, 729, 730, 731, 732, 611, - 612, 612, 612, 611, 611, 611, 611, 612, - 612, 611, 612, 612, 611, 611, 611, 612, - 612, 612, 612, 611, 733, 734, 735, 611, - 612, 612, 612, 612, 612, 611, 612, 611, - 612, 611, 736, 737, 738, 611, 739, 611, - 739, 611, 611, 739, 739, 611, 739, 739, - 611, 739, 739, 739, 611, 739, 611, 739, - 739, 611, 739, 739, 739, 739, 611, 739, - 739, 611, 611, 739, 739, 611, 739, 611, - 740, 741, 742, 743, 744, 745, 746, 748, - 749, 750, 751, 752, 753, 754, 755, 756, - 757, 758, 759, 631, 760, 761, 762, 763, - 764, 765, 766, 767, 768, 747, 611, 739, - 739, 739, 739, 611, 739, 611, 739, 739, - 611, 612, 612, 611, 611, 612, 739, 739, - 611, 739, 739, 611, 739, 611, 612, 739, - 739, 739, 612, 612, 611, 739, 739, 739, - 611, 611, 611, 739, 611, 612, 612, 739, - 739, 612, 611, 739, 739, 739, 611, 739, - 611, 739, 611, 739, 611, 612, 611, 611, - 739, 739, 611, 739, 611, 612, 739, 739, - 612, 739, 611, 612, 739, 739, 612, 612, - 739, 739, 611, 739, 739, 612, 611, 739, - 739, 739, 612, 612, 612, 611, 739, 612, - 739, 611, 611, 611, 612, 611, 611, 611, - 739, 739, 739, 612, 739, 612, 611, 739, - 739, 612, 612, 612, 739, 739, 739, 611, - 739, 739, 612, 612, 611, 611, 611, 739, - 739, 739, 611, 739, 611, 612, 739, 739, - 739, 739, 612, 739, 612, 612, 611, 739, - 612, 739, 611, 739, 611, 739, 612, 739, - 739, 611, 739, 611, 739, 739, 739, 739, - 612, 611, 612, 739, 611, 739, 739, 739, - 739, 611, 739, 611, 769, 770, 771, 772, - 773, 774, 775, 776, 777, 778, 779, 780, - 781, 782, 783, 784, 785, 786, 787, 788, - 789, 611, 612, 739, 739, 612, 739, 611, - 612, 739, 739, 739, 611, 739, 612, 739, - 739, 739, 611, 739, 611, 739, 739, 611, - 739, 739, 611, 612, 739, 612, 611, 739, - 739, 739, 611, 612, 739, 611, 739, 739, - 611, 739, 739, 612, 739, 612, 612, 739, - 611, 739, 739, 612, 611, 739, 739, 739, - 739, 612, 739, 739, 612, 739, 611, 739, - 611, 612, 612, 612, 739, 739, 612, 611, - 739, 611, 739, 611, 612, 612, 612, 612, - 739, 739, 612, 739, 611, 612, 739, 739, - 612, 739, 612, 611, 612, 739, 612, 739, - 611, 612, 739, 739, 739, 739, 612, 739, - 611, 739, 739, 611, 790, 791, 792, 793, - 794, 611, 739, 667, 611, 739, 611, 739, - 611, 739, 611, 739, 611, 795, 796, 611, - 739, 611, 739, 611, 797, 798, 799, 800, - 675, 801, 802, 803, 804, 805, 806, 807, - 808, 809, 810, 611, 739, 739, 611, 739, - 611, 739, 611, 739, 739, 739, 612, 612, - 739, 611, 739, 611, 739, 611, 612, 739, - 611, 739, 612, 611, 612, 739, 739, 739, - 612, 739, 612, 611, 739, 611, 612, 739, - 612, 739, 612, 739, 611, 739, 739, 612, - 739, 611, 739, 739, 739, 739, 611, 739, - 612, 612, 739, 739, 612, 611, 739, 739, - 612, 739, 612, 611, 811, 812, 798, 611, - 739, 611, 739, 739, 611, 813, 814, 815, - 816, 817, 818, 819, 611, 820, 821, 822, - 823, 824, 611, 739, 611, 739, 611, 739, - 611, 739, 739, 739, 739, 739, 611, 739, - 611, 825, 826, 827, 828, 829, 830, 831, - 832, 833, 834, 835, 836, 837, 838, 839, - 840, 841, 842, 843, 844, 845, 846, 847, - 611, 739, 612, 739, 611, 611, 739, 612, - 611, 612, 612, 611, 739, 612, 739, 739, - 611, 739, 611, 612, 739, 612, 739, 612, - 611, 611, 739, 611, 612, 739, 739, 612, - 739, 612, 739, 611, 739, 612, 739, 611, - 739, 739, 612, 739, 612, 611, 739, 739, - 612, 612, 612, 612, 739, 739, 611, 612, - 739, 611, 612, 612, 739, 611, 739, 612, - 739, 612, 739, 612, 739, 611, 612, 611, - 739, 739, 612, 612, 739, 612, 739, 611, - 611, 611, 739, 739, 612, 739, 612, 739, - 611, 611, 739, 612, 612, 739, 612, 739, - 611, 612, 739, 612, 739, 611, 612, 612, - 739, 739, 611, 612, 612, 612, 739, 739, - 611, 848, 849, 724, 850, 611, 739, 611, - 739, 611, 739, 611, 851, 611, 739, 611, - 852, 853, 854, 855, 856, 857, 611, 612, - 612, 739, 739, 739, 611, 611, 611, 611, - 739, 739, 611, 739, 739, 611, 611, 611, - 739, 739, 739, 739, 611, 858, 859, 860, - 611, 739, 739, 739, 739, 739, 611, 739, - 611, 739, 611, 861, 611, 612, 611, 862, - 611, 863, 864, 865, 867, 866, 611, 739, - 611, 611, 739, 739, 612, 611, 612, 611, - 868, 611, 869, 870, 871, 873, 872, 611, - 612, 611, 611, 612, 612, 688, 689, 690, - 691, 692, 693, 611, 641, 642, 643, 604, - 605, 874, 644, 645, 646, 647, 648, 649, - 650, 651, 652, 653, 654, 655, 656, 657, - 658, 659, 660, 661, 611, 875, 610, 641, - 642, 643, 876, 606, 607, 644, 645, 646, - 647, 648, 649, 650, 651, 652, 653, 654, - 655, 656, 657, 658, 659, 660, 661, 611, - 875, 611, 877, 875, 641, 642, 643, 878, - 607, 644, 645, 646, 647, 648, 649, 650, - 651, 652, 653, 654, 655, 656, 657, 658, - 659, 660, 661, 611, 877, 611, 609, 877, - 879, 611, 877, 611, 880, 881, 611, 875, - 611, 611, 877, 611, 875, 611, 875, 671, - 672, 673, 674, 675, 676, 677, 882, 679, - 680, 681, 682, 683, 684, 685, 884, 885, - 886, 887, 888, 889, 884, 885, 886, 887, - 888, 889, 884, 883, 890, 611, 612, 610, - 611, 891, 891, 891, 877, 611, 641, 642, - 643, 876, 874, 644, 645, 646, 647, 648, - 649, 650, 651, 652, 653, 654, 655, 656, - 657, 658, 659, 660, 661, 611, 880, 892, - 611, 611, 875, 891, 891, 877, 891, 891, - 877, 891, 891, 891, 877, 891, 891, 877, - 891, 891, 877, 891, 891, 611, 877, 877, - 886, 887, 888, 889, 883, 884, 886, 887, - 888, 889, 883, 884, 886, 887, 888, 889, - 883, 884, 886, 887, 888, 889, 883, 884, - 886, 887, 888, 889, 883, 884, 886, 887, - 888, 889, 883, 884, 886, 887, 888, 889, - 883, 884, 886, 887, 888, 889, 883, 884, - 886, 887, 888, 889, 883, 884, 885, 890, - 887, 888, 889, 883, 884, 885, 887, 888, - 889, 883, 884, 885, 887, 888, 889, 883, - 884, 885, 887, 888, 889, 883, 884, 885, - 887, 888, 889, 883, 884, 885, 887, 888, - 889, 883, 884, 885, 887, 888, 889, 883, - 884, 885, 887, 888, 889, 883, 884, 885, - 887, 888, 889, 883, 884, 885, 886, 890, - 888, 889, 883, 884, 885, 886, 888, 889, - 883, 884, 885, 886, 888, 889, 883, 884, - 885, 886, 888, 889, 883, 884, 885, 886, - 888, 893, 892, 887, 611, 890, 891, 611, - 875, 877, 265, 3, 1, 894, 895, 896, - 897, 898, 601, 1, 265, 899, 3, 265, - 3, 265, 3, 1, 901, 900, 900, 901, - 901, 900, 901, 901, 900, 901, 901, 901, - 900, 901, 900, 901, 901, 900, 901, 901, - 901, 901, 900, 901, 901, 900, 900, 901, - 901, 900, 901, 900, 902, 903, 904, 905, - 906, 908, 909, 910, 912, 913, 914, 915, - 916, 917, 918, 919, 920, 921, 922, 923, - 924, 925, 926, 927, 928, 929, 907, 911, - 900, 901, 901, 901, 901, 900, 901, 900, - 901, 901, 900, 900, 900, 901, 900, 900, - 900, 901, 901, 901, 901, 900, 900, 900, - 900, 900, 900, 901, 900, 900, 900, 900, - 900, 900, 901, 900, 900, 900, 900, 901, - 901, 901, 901, 900, 901, 901, 901, 901, - 901, 900, 901, 901, 900, 901, 901, 901, - 901, 900, 901, 901, 900, 900, 900, 900, - 900, 900, 901, 901, 901, 901, 901, 901, - 900, 901, 901, 900, 900, 900, 900, 900, - 900, 901, 901, 900, 901, 901, 901, 901, - 901, 900, 901, 901, 900, 901, 900, 901, - 901, 901, 900, 901, 900, 901, 901, 901, - 901, 901, 900, 901, 900, 901, 901, 901, - 901, 900, 901, 900, 930, 931, 932, 933, - 934, 935, 936, 937, 938, 939, 940, 941, - 942, 943, 944, 945, 946, 947, 948, 949, - 950, 900, 901, 901, 900, 901, 901, 901, - 900, 901, 901, 901, 901, 900, 901, 900, - 901, 901, 900, 901, 901, 900, 901, 900, - 900, 900, 901, 901, 900, 901, 901, 900, - 901, 901, 900, 901, 900, 901, 901, 901, - 901, 901, 900, 901, 900, 901, 901, 900, - 900, 900, 901, 901, 901, 900, 901, 900, - 901, 900, 901, 901, 901, 901, 901, 900, - 901, 901, 900, 951, 952, 953, 954, 955, - 900, 901, 899, 900, 901, 900, 901, 900, - 901, 900, 901, 900, 956, 957, 900, 901, - 900, 901, 900, 958, 959, 960, 961, 962, - 963, 964, 965, 966, 967, 968, 969, 970, - 971, 972, 900, 901, 901, 900, 901, 900, - 901, 900, 901, 901, 901, 901, 900, 901, - 901, 900, 900, 900, 901, 901, 900, 901, - 900, 901, 901, 900, 900, 900, 901, 901, - 900, 901, 901, 901, 900, 901, 901, 901, - 901, 900, 901, 901, 901, 900, 901, 901, - 900, 973, 974, 959, 900, 901, 900, 901, - 901, 900, 975, 976, 977, 978, 979, 980, - 900, 981, 982, 983, 984, 985, 900, 901, - 900, 901, 900, 901, 900, 901, 901, 901, - 901, 901, 900, 901, 900, 986, 987, 988, - 989, 990, 991, 992, 993, 994, 995, 996, - 997, 998, 999, 1000, 1001, 1002, 999, 1003, - 1004, 1005, 1006, 1007, 900, 901, 901, 900, - 900, 901, 900, 900, 901, 901, 901, 900, - 901, 900, 901, 901, 900, 900, 900, 901, - 901, 901, 900, 901, 900, 901, 901, 901, - 900, 901, 901, 901, 901, 901, 901, 901, - 900, 901, 900, 901, 900, 901, 900, 900, - 901, 901, 901, 900, 900, 900, 901, 900, - 901, 901, 900, 901, 900, 901, 901, 900, - 901, 901, 900, 1008, 1009, 1010, 1011, 900, - 901, 900, 901, 900, 901, 900, 901, 900, - 1012, 900, 901, 900, 1013, 1014, 1015, 1016, - 1017, 1018, 900, 901, 901, 901, 900, 900, - 900, 900, 901, 901, 900, 901, 901, 900, - 900, 900, 901, 901, 901, 901, 900, 1019, - 1020, 1021, 900, 901, 901, 901, 901, 901, - 900, 901, 900, 901, 900, 1022, 900, 1023, - 1024, 1025, 1027, 1026, 900, 901, 900, 900, - 901, 901, 951, 952, 1028, 953, 954, 955, - 900, 901, 900, 975, 976, 977, 978, 979, - 980, 1029, 900, 1030, 1031, 1032, 900, 1033, - 900, 1033, 900, 900, 1033, 1033, 900, 1033, - 1033, 900, 1033, 1033, 1033, 900, 1033, 900, - 1033, 1033, 900, 1033, 1033, 1033, 1033, 900, - 1033, 1033, 900, 900, 1033, 1033, 900, 1033, - 900, 1034, 1035, 1036, 1037, 1038, 1039, 1040, - 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, - 1050, 1051, 1052, 1053, 920, 1054, 1055, 1056, - 1057, 1058, 1059, 1060, 1061, 1062, 1041, 900, - 1033, 1033, 1033, 1033, 900, 1033, 900, 1033, - 1033, 900, 901, 901, 900, 900, 901, 1033, - 1033, 900, 1033, 1033, 900, 1033, 900, 901, - 1033, 1033, 1033, 901, 901, 900, 1033, 1033, - 1033, 900, 900, 900, 1033, 900, 901, 901, - 1033, 1033, 901, 900, 1033, 1033, 1033, 900, - 1033, 900, 1033, 900, 1033, 900, 901, 900, - 900, 1033, 1033, 900, 1033, 900, 901, 1033, - 1033, 901, 1033, 900, 901, 1033, 1033, 901, - 901, 1033, 1033, 900, 1033, 1033, 901, 900, - 1033, 1033, 1033, 901, 901, 901, 900, 1033, - 901, 1033, 900, 900, 900, 901, 900, 900, - 900, 1033, 1033, 1033, 901, 1033, 901, 900, - 1033, 1033, 901, 901, 901, 1033, 1033, 1033, - 900, 1033, 1033, 901, 901, 900, 900, 900, - 1033, 1033, 1033, 900, 1033, 900, 901, 1033, - 1033, 1033, 1033, 901, 1033, 901, 901, 900, - 1033, 901, 1033, 900, 1033, 900, 1033, 901, - 1033, 1033, 900, 1033, 900, 1033, 1033, 1033, - 1033, 901, 900, 901, 1033, 900, 1033, 1033, - 1033, 1033, 900, 1033, 900, 1063, 1064, 1065, - 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, - 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, - 1082, 1083, 900, 901, 1033, 1033, 901, 1033, - 900, 901, 1033, 1033, 1033, 900, 1033, 901, - 1033, 1033, 1033, 900, 1033, 900, 1033, 1033, - 900, 1033, 1033, 900, 901, 1033, 901, 900, - 1033, 1033, 1033, 900, 901, 1033, 900, 1033, - 1033, 900, 1033, 1033, 901, 1033, 901, 901, - 1033, 900, 1033, 1033, 901, 900, 1033, 1033, - 1033, 1033, 901, 1033, 1033, 901, 1033, 900, - 1033, 900, 901, 901, 901, 1033, 1033, 901, - 900, 1033, 900, 1033, 900, 901, 901, 901, - 901, 1033, 1033, 901, 1033, 900, 901, 1033, - 1033, 901, 1033, 901, 900, 901, 1033, 901, - 1033, 900, 901, 1033, 1033, 1033, 1033, 901, - 1033, 900, 1033, 1033, 900, 1084, 1085, 1086, - 1087, 1088, 900, 1033, 899, 900, 1033, 900, - 1033, 900, 1033, 900, 1033, 900, 1089, 1090, - 900, 1033, 900, 1033, 900, 1091, 1092, 1093, - 1094, 962, 1095, 1096, 1097, 1098, 1099, 1100, - 1101, 1102, 1103, 1104, 900, 1033, 1033, 900, - 1033, 900, 1033, 900, 1033, 1033, 1033, 901, - 901, 1033, 900, 1033, 900, 1033, 900, 901, - 1033, 900, 1033, 901, 900, 901, 1033, 1033, - 1033, 901, 1033, 901, 900, 1033, 900, 901, - 1033, 901, 1033, 901, 1033, 900, 1033, 1033, - 901, 1033, 900, 1033, 1033, 1033, 1033, 900, - 1033, 901, 901, 1033, 1033, 901, 900, 1033, - 1033, 901, 1033, 901, 900, 1105, 1106, 1092, - 900, 1033, 900, 1033, 1033, 900, 1107, 1108, - 1109, 1110, 1111, 1112, 1113, 900, 1114, 1115, - 1116, 1117, 1118, 900, 1033, 900, 1033, 900, - 1033, 900, 1033, 1033, 1033, 1033, 1033, 900, - 1033, 900, 1119, 1120, 1121, 1122, 1123, 1124, - 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, - 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, - 1141, 900, 1033, 901, 1033, 900, 900, 1033, - 901, 900, 901, 901, 900, 1033, 901, 1033, - 1033, 900, 1033, 900, 901, 1033, 901, 1033, - 901, 900, 900, 1033, 900, 901, 1033, 1033, - 901, 1033, 901, 1033, 900, 1033, 901, 1033, - 900, 1033, 1033, 901, 1033, 901, 900, 1033, - 1033, 901, 901, 901, 901, 1033, 1033, 900, - 901, 1033, 900, 901, 901, 1033, 900, 1033, - 901, 1033, 901, 1033, 901, 1033, 900, 901, - 900, 1033, 1033, 901, 901, 1033, 901, 1033, - 900, 900, 900, 1033, 1033, 901, 1033, 901, - 1033, 900, 900, 1033, 901, 901, 1033, 901, - 1033, 900, 901, 1033, 901, 1033, 900, 901, - 901, 1033, 1033, 900, 901, 901, 901, 1033, - 1033, 900, 1142, 1143, 1010, 1144, 900, 1033, - 900, 1033, 900, 1033, 900, 1145, 900, 1033, - 900, 1146, 1147, 1148, 1149, 1150, 1151, 900, - 901, 901, 1033, 1033, 1033, 900, 900, 900, - 900, 1033, 1033, 900, 1033, 1033, 900, 900, - 900, 1033, 1033, 1033, 1033, 900, 1152, 1153, - 1154, 900, 1033, 1033, 1033, 1033, 1033, 900, - 1033, 900, 1033, 900, 1155, 900, 901, 900, - 1156, 900, 1157, 1158, 1159, 1161, 1160, 900, - 1033, 900, 900, 1033, 1033, 901, 900, 901, - 900, 3, 265, 3, 1, 1162, 3, 1, - 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1162, - 1163, 1162, 1162, 1162, 1163, 1162, 1163, 1162, - 1162, 1163, 1162, 1162, 1162, 1162, 1163, 1162, - 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1163, - 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1172, - 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, - 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, - 1189, 1190, 1191, 1192, 1193, 1171, 1163, 1162, - 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, - 1163, 1194, 1194, 1163, 1163, 1194, 1162, 1194, - 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1163, - 1194, 1194, 1194, 1163, 1194, 1163, 1194, 1194, - 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, - 1163, 1163, 1194, 1194, 1163, 1194, 1163, 1195, - 1196, 1197, 1198, 1199, 1201, 1202, 1203, 1205, - 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1184, - 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, - 1221, 1200, 1204, 1163, 1194, 1194, 1194, 1194, - 1163, 1194, 1163, 1194, 1194, 1163, 1163, 1163, - 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1194, - 1163, 1163, 1163, 1163, 1163, 1163, 1194, 1163, - 1163, 1163, 1163, 1163, 1163, 1194, 1163, 1163, - 1163, 1163, 1194, 1194, 1194, 1194, 1163, 1194, - 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, - 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, - 1163, 1163, 1163, 1163, 1163, 1194, 1194, 1194, - 1194, 1194, 1194, 1163, 1194, 1194, 1163, 1163, - 1163, 1163, 1163, 1163, 1194, 1194, 1163, 1194, - 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, - 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1163, - 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, - 1194, 1194, 1194, 1194, 1163, 1194, 1163, 1222, - 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, - 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, - 1239, 1240, 1241, 1242, 1163, 1194, 1194, 1163, - 1194, 1194, 1194, 1163, 1194, 1194, 1194, 1194, - 1163, 1194, 1163, 1194, 1194, 1163, 1194, 1194, - 1163, 1194, 1163, 1163, 1163, 1194, 1194, 1163, - 1194, 1194, 1163, 1194, 1194, 1163, 1194, 1163, - 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, - 1194, 1194, 1163, 1163, 1163, 1194, 1194, 1194, - 1163, 1194, 1163, 1194, 1163, 1194, 1194, 1194, - 1194, 1194, 1163, 1194, 1194, 1163, 1243, 1244, - 1245, 1246, 1247, 1163, 1194, 1248, 1163, 1243, - 1244, 1249, 1245, 1246, 1247, 1163, 1194, 1163, - 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1163, - 1250, 1251, 1163, 1194, 1163, 1194, 1163, 1252, - 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, - 1261, 1262, 1263, 1264, 1265, 1266, 1163, 1194, - 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1194, - 1194, 1194, 1163, 1194, 1194, 1163, 1163, 1163, - 1194, 1194, 1163, 1194, 1163, 1194, 1194, 1163, - 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1194, - 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, - 1194, 1163, 1194, 1194, 1163, 1267, 1268, 1253, - 1163, 1194, 1163, 1194, 1194, 1163, 1269, 1270, - 1271, 1272, 1273, 1274, 1275, 1163, 1276, 1277, - 1278, 1279, 1280, 1163, 1194, 1163, 1194, 1163, - 1194, 1163, 1194, 1194, 1194, 1194, 1194, 1163, - 1194, 1163, 1281, 1282, 1283, 1284, 1285, 1286, - 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, - 1295, 1296, 1297, 1294, 1298, 1299, 1300, 1301, - 1302, 1163, 1194, 1194, 1163, 1163, 1194, 1163, - 1163, 1194, 1194, 1194, 1163, 1194, 1163, 1194, - 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1163, - 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1194, - 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, - 1194, 1163, 1194, 1163, 1163, 1194, 1194, 1194, - 1163, 1163, 1163, 1194, 1163, 1194, 1194, 1163, - 1194, 1163, 1194, 1194, 1163, 1194, 1194, 1163, - 1303, 1304, 1305, 1306, 1163, 1194, 1163, 1194, - 1163, 1194, 1163, 1194, 1163, 1307, 1163, 1194, - 1163, 1308, 1309, 1310, 1311, 1312, 1313, 1163, - 1194, 1194, 1194, 1163, 1163, 1163, 1163, 1194, - 1194, 1163, 1194, 1194, 1163, 1163, 1163, 1194, - 1194, 1194, 1194, 1163, 1314, 1315, 1316, 1163, - 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, - 1194, 1163, 1317, 1318, 1319, 1163, 1162, 1163, - 1194, 1163, 1194, 1163, 1320, 1163, 1321, 1322, - 1323, 1325, 1324, 1163, 1194, 1163, 1163, 1194, - 1194, 1269, 1270, 1271, 1272, 1273, 1274, 1163, - 1162, 1163, 1162, 1162, 1163, 1162, 1163, 1194, - 1162, 1162, 1162, 1194, 1194, 1163, 1162, 1162, - 1162, 1163, 1163, 1163, 1162, 1163, 1194, 1194, - 1162, 1162, 1194, 1163, 1162, 1162, 1162, 1163, - 1162, 1163, 1162, 1163, 1162, 1163, 1194, 1163, - 1163, 1162, 1162, 1163, 1162, 1163, 1194, 1162, - 1162, 1194, 1162, 1163, 1194, 1162, 1162, 1194, - 1194, 1162, 1162, 1163, 1162, 1162, 1194, 1163, - 1162, 1162, 1162, 1194, 1194, 1194, 1163, 1162, - 1194, 1162, 1163, 1163, 1163, 1194, 1163, 1163, - 1163, 1162, 1162, 1162, 1194, 1162, 1194, 1163, - 1162, 1162, 1194, 1194, 1194, 1162, 1162, 1162, - 1163, 1162, 1162, 1194, 1194, 1163, 1163, 1163, - 1162, 1162, 1162, 1163, 1162, 1163, 1194, 1162, - 1162, 1162, 1162, 1194, 1162, 1194, 1194, 1163, - 1162, 1194, 1162, 1163, 1162, 1163, 1162, 1194, - 1162, 1162, 1163, 1162, 1163, 1162, 1162, 1162, - 1162, 1194, 1163, 1194, 1162, 1163, 1162, 1162, - 1162, 1162, 1163, 1162, 1163, 1326, 1327, 1328, - 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, - 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, - 1345, 1346, 1163, 1194, 1162, 1162, 1194, 1162, - 1163, 1194, 1162, 1162, 1162, 1163, 1162, 1194, - 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, - 1163, 1162, 1162, 1163, 1194, 1162, 1194, 1163, - 1162, 1162, 1162, 1163, 1194, 1162, 1163, 1162, - 1162, 1163, 1162, 1162, 1194, 1162, 1194, 1194, - 1162, 1163, 1162, 1162, 1194, 1163, 1162, 1162, - 1162, 1162, 1194, 1162, 1162, 1194, 1162, 1163, - 1162, 1163, 1194, 1194, 1194, 1162, 1162, 1194, - 1163, 1162, 1163, 1162, 1163, 1194, 1194, 1194, - 1194, 1162, 1162, 1194, 1162, 1163, 1194, 1162, - 1162, 1194, 1162, 1194, 1163, 1194, 1162, 1194, - 1162, 1163, 1194, 1162, 1162, 1162, 1162, 1194, - 1162, 1163, 1162, 1162, 1163, 1347, 1348, 1349, - 1350, 1351, 1163, 1162, 1248, 1163, 1162, 1163, - 1162, 1163, 1162, 1163, 1162, 1163, 1352, 1353, - 1163, 1162, 1163, 1162, 1163, 1354, 1355, 1356, - 1357, 1256, 1358, 1359, 1360, 1361, 1362, 1363, - 1364, 1365, 1366, 1367, 1163, 1162, 1162, 1163, - 1162, 1163, 1162, 1163, 1162, 1162, 1162, 1194, - 1194, 1162, 1163, 1162, 1163, 1162, 1163, 1194, - 1162, 1163, 1162, 1194, 1163, 1194, 1162, 1162, - 1162, 1194, 1162, 1194, 1163, 1162, 1163, 1194, - 1162, 1194, 1162, 1194, 1162, 1163, 1162, 1162, - 1194, 1162, 1163, 1162, 1162, 1162, 1162, 1163, - 1162, 1194, 1194, 1162, 1162, 1194, 1163, 1162, - 1162, 1194, 1162, 1194, 1163, 1368, 1369, 1355, - 1163, 1162, 1163, 1162, 1162, 1163, 1370, 1371, - 1372, 1373, 1374, 1375, 1376, 1163, 1377, 1378, - 1379, 1380, 1381, 1163, 1162, 1163, 1162, 1163, - 1162, 1163, 1162, 1162, 1162, 1162, 1162, 1163, - 1162, 1163, 1382, 1383, 1384, 1385, 1386, 1387, - 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, - 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, - 1404, 1163, 1162, 1194, 1162, 1163, 1163, 1162, - 1194, 1163, 1194, 1194, 1163, 1162, 1194, 1162, - 1162, 1163, 1162, 1163, 1194, 1162, 1194, 1162, - 1194, 1163, 1163, 1162, 1163, 1194, 1162, 1162, - 1194, 1162, 1194, 1162, 1163, 1162, 1194, 1162, - 1163, 1162, 1162, 1194, 1162, 1194, 1163, 1162, - 1162, 1194, 1194, 1194, 1194, 1162, 1162, 1163, - 1194, 1162, 1163, 1194, 1194, 1162, 1163, 1162, - 1194, 1162, 1194, 1162, 1194, 1162, 1163, 1194, - 1163, 1162, 1162, 1194, 1194, 1162, 1194, 1162, - 1163, 1163, 1163, 1162, 1162, 1194, 1162, 1194, - 1162, 1163, 1163, 1162, 1194, 1194, 1162, 1194, - 1162, 1163, 1194, 1162, 1194, 1162, 1163, 1194, - 1194, 1162, 1162, 1163, 1194, 1194, 1194, 1162, - 1162, 1163, 1405, 1406, 1305, 1407, 1163, 1162, - 1163, 1162, 1163, 1162, 1163, 1408, 1163, 1162, - 1163, 1409, 1410, 1411, 1412, 1413, 1414, 1163, - 1194, 1194, 1162, 1162, 1162, 1163, 1163, 1163, - 1163, 1162, 1162, 1163, 1162, 1162, 1163, 1163, - 1163, 1162, 1162, 1162, 1162, 1163, 1415, 1416, - 1417, 1163, 1162, 1162, 1162, 1162, 1162, 1163, - 1162, 1163, 1162, 1163, 1418, 1163, 1194, 1163, - 1419, 1163, 1420, 1421, 1422, 1424, 1423, 1163, - 1162, 1163, 1163, 1162, 1162, 1162, 3, 1, - 3, 1162, 3, 1, 601, 1, 1425, 1427, - 1428, 1429, 1430, 1431, 1432, 1427, 1428, 1429, - 1430, 1431, 1432, 1427, 601, 1426, 890, 1, - 3, 610, 3, 1, 875, 875, 875, 877, - 1, 875, 875, 877, 875, 875, 877, 875, - 875, 875, 877, 875, 875, 877, 875, 875, - 877, 875, 875, 1, 877, 1429, 1430, 1431, - 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, - 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, - 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, - 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, - 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, - 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, - 1432, 1426, 1427, 1428, 890, 1430, 1431, 1432, - 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, - 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, - 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, - 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, - 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, - 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, - 1426, 1427, 1428, 1429, 890, 1431, 1432, 1426, - 1427, 1428, 1429, 1431, 1432, 1426, 1427, 1428, - 1429, 1431, 1432, 1426, 1427, 1428, 1429, 1431, - 1432, 1426, 1427, 1428, 1429, 1431, 1433, 1434, - 1435, 1437, 1430, 1436, 1, 890, 875, 3, - 875, 877, 3, 877, 3, 1, 875, 1, - 265, 265, 1, 265, 1438, 1439, 601, 1, - 265, 3, 1, 3, 3, 265, 3, 1, - 1441, 1442, 1443, 1444, 1440, 1, 1445, 1446, - 601, 1, 266, 3, 1, 3, 266, 3, - 1, 1447, 601, 1, 3, 265, 3, 1, - 1448, 601, 1, 3, 265, 3, 1, 1449, - 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, - 1458, 1459, 601, 1, 3, 1460, 1, 1462, - 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1461, - 1462, 1462, 1462, 1461, 1462, 1461, 1462, 1462, - 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, - 1461, 1461, 1462, 1462, 1461, 1462, 1461, 1463, - 1464, 1465, 1466, 1467, 1469, 1470, 1471, 1473, - 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, - 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, - 1490, 1468, 1472, 1461, 1462, 1462, 1462, 1462, - 1461, 1462, 1461, 1462, 1462, 1461, 1461, 1461, - 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1462, - 1461, 1461, 1461, 1461, 1461, 1461, 1462, 1461, - 1461, 1461, 1461, 1461, 1461, 1462, 1461, 1461, - 1461, 1461, 1462, 1462, 1462, 1462, 1461, 1462, - 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, - 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, - 1461, 1461, 1461, 1461, 1461, 1462, 1462, 1462, - 1462, 1462, 1462, 1461, 1462, 1462, 1461, 1461, - 1461, 1461, 1461, 1461, 1462, 1462, 1461, 1462, - 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, - 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1461, - 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, - 1462, 1462, 1462, 1462, 1461, 1462, 1461, 1491, - 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, - 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, - 1508, 1509, 1510, 1511, 1461, 1462, 1462, 1461, - 1462, 1462, 1462, 1461, 1462, 1462, 1462, 1462, - 1461, 1462, 1461, 1462, 1462, 1461, 1462, 1462, - 1461, 1462, 1461, 1461, 1461, 1462, 1462, 1461, - 1462, 1462, 1461, 1462, 1462, 1461, 1462, 1461, - 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, - 1462, 1462, 1461, 1461, 1461, 1462, 1462, 1462, - 1461, 1462, 1461, 1462, 1461, 1462, 1462, 1462, - 1462, 1462, 1461, 1462, 1462, 1461, 1512, 1513, - 1514, 1515, 1516, 1461, 1462, 1517, 1461, 1512, - 1513, 1518, 1514, 1515, 1516, 1461, 1462, 1461, - 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1461, - 1519, 1520, 1461, 1462, 1461, 1462, 1461, 1521, - 1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, - 1530, 1531, 1532, 1533, 1534, 1535, 1461, 1462, - 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1462, - 1462, 1462, 1461, 1462, 1462, 1461, 1461, 1461, - 1462, 1462, 1461, 1462, 1461, 1462, 1462, 1461, - 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1462, - 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, - 1462, 1461, 1462, 1462, 1461, 1536, 1537, 1522, - 1461, 1462, 1461, 1462, 1462, 1461, 1538, 1539, - 1540, 1541, 1542, 1543, 1544, 1461, 1545, 1546, - 1547, 1548, 1549, 1461, 1462, 1461, 1462, 1461, - 1462, 1461, 1462, 1462, 1462, 1462, 1462, 1461, - 1462, 1461, 1550, 1551, 1552, 1553, 1554, 1555, - 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, - 1564, 1565, 1566, 1563, 1567, 1568, 1569, 1570, - 1571, 1461, 1462, 1462, 1461, 1461, 1462, 1461, - 1461, 1462, 1462, 1462, 1461, 1462, 1461, 1462, - 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1461, - 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, - 1462, 1461, 1462, 1461, 1461, 1462, 1462, 1462, - 1461, 1461, 1461, 1462, 1461, 1462, 1462, 1461, - 1462, 1461, 1462, 1462, 1461, 1462, 1462, 1461, - 1572, 1573, 1574, 1575, 1461, 1462, 1461, 1462, - 1461, 1462, 1461, 1462, 1461, 1576, 1461, 1462, - 1461, 1577, 1578, 1579, 1580, 1581, 1582, 1461, - 1462, 1462, 1462, 1461, 1461, 1461, 1461, 1462, - 1462, 1461, 1462, 1462, 1461, 1461, 1461, 1462, - 1462, 1462, 1462, 1461, 1583, 1584, 1585, 1461, - 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, - 1462, 1461, 1586, 1587, 1588, 1461, 1589, 1461, - 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1589, - 1461, 1589, 1589, 1589, 1461, 1589, 1461, 1589, - 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, - 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1461, - 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1598, - 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, - 1607, 1608, 1609, 1481, 1610, 1611, 1612, 1613, - 1614, 1615, 1616, 1617, 1618, 1597, 1461, 1589, - 1589, 1589, 1589, 1461, 1589, 1461, 1589, 1589, - 1461, 1462, 1462, 1461, 1461, 1462, 1589, 1589, - 1461, 1589, 1589, 1461, 1589, 1461, 1462, 1589, - 1589, 1589, 1462, 1462, 1461, 1589, 1589, 1589, - 1461, 1461, 1461, 1589, 1461, 1462, 1462, 1589, - 1589, 1462, 1461, 1589, 1589, 1589, 1461, 1589, - 1461, 1589, 1461, 1589, 1461, 1462, 1461, 1461, - 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, - 1462, 1589, 1461, 1462, 1589, 1589, 1462, 1462, - 1589, 1589, 1461, 1589, 1589, 1462, 1461, 1589, - 1589, 1589, 1462, 1462, 1462, 1461, 1589, 1462, - 1589, 1461, 1461, 1461, 1462, 1461, 1461, 1461, - 1589, 1589, 1589, 1462, 1589, 1462, 1461, 1589, - 1589, 1462, 1462, 1462, 1589, 1589, 1589, 1461, - 1589, 1589, 1462, 1462, 1461, 1461, 1461, 1589, - 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, - 1589, 1589, 1462, 1589, 1462, 1462, 1461, 1589, - 1462, 1589, 1461, 1589, 1461, 1589, 1462, 1589, - 1589, 1461, 1589, 1461, 1589, 1589, 1589, 1589, - 1462, 1461, 1462, 1589, 1461, 1589, 1589, 1589, - 1589, 1461, 1589, 1461, 1619, 1620, 1621, 1622, - 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, - 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638, - 1639, 1461, 1462, 1589, 1589, 1462, 1589, 1461, - 1462, 1589, 1589, 1589, 1461, 1589, 1462, 1589, - 1589, 1589, 1461, 1589, 1461, 1589, 1589, 1461, - 1589, 1589, 1461, 1462, 1589, 1462, 1461, 1589, - 1589, 1589, 1461, 1462, 1589, 1461, 1589, 1589, - 1461, 1589, 1589, 1462, 1589, 1462, 1462, 1589, - 1461, 1589, 1589, 1462, 1461, 1589, 1589, 1589, - 1589, 1462, 1589, 1589, 1462, 1589, 1461, 1589, - 1461, 1462, 1462, 1462, 1589, 1589, 1462, 1461, - 1589, 1461, 1589, 1461, 1462, 1462, 1462, 1462, - 1589, 1589, 1462, 1589, 1461, 1462, 1589, 1589, - 1462, 1589, 1462, 1461, 1462, 1589, 1462, 1589, - 1461, 1462, 1589, 1589, 1589, 1589, 1462, 1589, - 1461, 1589, 1589, 1461, 1640, 1641, 1642, 1643, - 1644, 1461, 1589, 1517, 1461, 1589, 1461, 1589, - 1461, 1589, 1461, 1589, 1461, 1645, 1646, 1461, - 1589, 1461, 1589, 1461, 1647, 1648, 1649, 1650, - 1525, 1651, 1652, 1653, 1654, 1655, 1656, 1657, - 1658, 1659, 1660, 1461, 1589, 1589, 1461, 1589, - 1461, 1589, 1461, 1589, 1589, 1589, 1462, 1462, - 1589, 1461, 1589, 1461, 1589, 1461, 1462, 1589, - 1461, 1589, 1462, 1461, 1462, 1589, 1589, 1589, - 1462, 1589, 1462, 1461, 1589, 1461, 1462, 1589, - 1462, 1589, 1462, 1589, 1461, 1589, 1589, 1462, - 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, - 1462, 1462, 1589, 1589, 1462, 1461, 1589, 1589, - 1462, 1589, 1462, 1461, 1661, 1662, 1648, 1461, - 1589, 1461, 1589, 1589, 1461, 1663, 1664, 1665, - 1666, 1667, 1668, 1669, 1461, 1670, 1671, 1672, - 1673, 1674, 1461, 1589, 1461, 1589, 1461, 1589, - 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, - 1461, 1675, 1676, 1677, 1678, 1679, 1680, 1681, - 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, - 1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, - 1461, 1589, 1462, 1589, 1461, 1461, 1589, 1462, - 1461, 1462, 1462, 1461, 1589, 1462, 1589, 1589, - 1461, 1589, 1461, 1462, 1589, 1462, 1589, 1462, - 1461, 1461, 1589, 1461, 1462, 1589, 1589, 1462, - 1589, 1462, 1589, 1461, 1589, 1462, 1589, 1461, - 1589, 1589, 1462, 1589, 1462, 1461, 1589, 1589, - 1462, 1462, 1462, 1462, 1589, 1589, 1461, 1462, - 1589, 1461, 1462, 1462, 1589, 1461, 1589, 1462, - 1589, 1462, 1589, 1462, 1589, 1461, 1462, 1461, - 1589, 1589, 1462, 1462, 1589, 1462, 1589, 1461, - 1461, 1461, 1589, 1589, 1462, 1589, 1462, 1589, - 1461, 1461, 1589, 1462, 1462, 1589, 1462, 1589, - 1461, 1462, 1589, 1462, 1589, 1461, 1462, 1462, - 1589, 1589, 1461, 1462, 1462, 1462, 1589, 1589, - 1461, 1698, 1699, 1574, 1700, 1461, 1589, 1461, - 1589, 1461, 1589, 1461, 1701, 1461, 1589, 1461, - 1702, 1703, 1704, 1705, 1706, 1707, 1461, 1462, - 1462, 1589, 1589, 1589, 1461, 1461, 1461, 1461, - 1589, 1589, 1461, 1589, 1589, 1461, 1461, 1461, - 1589, 1589, 1589, 1589, 1461, 1708, 1709, 1710, - 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, - 1461, 1589, 1461, 1711, 1461, 1462, 1461, 1712, - 1461, 1713, 1714, 1715, 1717, 1716, 1461, 1589, - 1461, 1461, 1589, 1589, 1462, 1461, 1462, 1461, - 1718, 1461, 1719, 1720, 1721, 1723, 1722, 1461, - 1462, 1461, 1461, 1462, 1462, 1538, 1539, 1540, - 1541, 1542, 1543, 1461, 1538, 1539, 1540, 1541, - 1542, 1543, 1724, 1461, 1725, 1461, 1462, 1461, - 1162, 3, 1, 3, 1162, 3, 1162, 3, - 1, 1162, 1162, 3, 1162, 3, 1162, 3, - 1162, 3, 1162, 3, 1, 3, 3, 1162, - 1162, 3, 1, 1162, 1162, 3, 1, 1162, - 3, 1162, 3, 1, 3, 1162, 3, 1162, - 3, 1, 1162, 3, 1162, 3, 1, 1162, - 3, 1, 1162, 1162, 3, 3, 1162, 3, - 1162, 3, 1162, 1, 1440, 1, 1726, 1440, - 1, 1727, 1435, 1437, 1728, 1437, 601, 1436, - 1, 265, 3, 1, 3, 265, 1, 1, - 1730, 1729, 1733, 1734, 1735, 1736, 1737, 1738, - 1739, 1741, 1742, 1743, 1744, 1745, 1746, 1748, - 1729, 1, 1732, 1740, 1747, 1, 1731, 262, - 264, 1750, 1751, 1752, 1753, 1754, 1755, 1756, - 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, - 1765, 1766, 1767, 1749, 262, 264, 1750, 1751, - 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, - 1760, 1761, 1768, 1763, 1764, 1765, 1769, 1767, - 1749, 256, 258, 1770, 1771, 1772, 1773, 1774, - 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, - 1783, 1784, 1785, 1786, 1787, 1749, 1789, 1790, - 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, - 1799, 1800, 1801, 1803, 268, 530, 576, 1802, - 1788, 527, 529, 1804, 1805, 1806, 1807, 1808, - 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, - 1817, 1818, 1819, 1820, 1821, 1788, 527, 529, - 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, - 1812, 1813, 1814, 1815, 1822, 1817, 1818, 1819, - 1823, 1821, 1788, 521, 523, 1824, 1825, 1826, - 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, - 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1788, - 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, - 1810, 1811, 1812, 1813, 1814, 1842, 1816, 1817, - 1843, 1844, 1845, 1846, 1819, 1820, 1821, 1788, - 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, - 1810, 1811, 1812, 1813, 1814, 1847, 1816, 1817, - 1818, 1848, 1819, 1820, 1821, 1788, 527, 529, - 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, - 1812, 1813, 1814, 1849, 1816, 1817, 1818, 1850, - 1819, 1820, 1821, 1788, 527, 529, 1804, 1805, - 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, - 1814, 1851, 1816, 1817, 1818, 1852, 1819, 1820, - 1821, 1788, 527, 529, 1804, 1805, 1806, 1807, - 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, - 1816, 1817, 1818, 1819, 1853, 1821, 1788, 871, - 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, - 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, - 1870, 1871, 1872, 1873, 1874, 1875, 1854, 871, - 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, - 1862, 1863, 1864, 1865, 1876, 1867, 1868, 1877, - 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, - 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, - 1865, 1876, 1878, 1868, 1877, 1873, 1879, 1875, - 1854, 865, 867, 1880, 1881, 1882, 1883, 1884, - 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, - 1893, 1894, 1895, 1896, 1897, 1854, 871, 873, - 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, - 1863, 1864, 1865, 1898, 1867, 1868, 1877, 1899, - 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, - 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, - 1865, 1900, 1867, 1868, 1877, 1901, 1873, 1874, - 1875, 1854, 871, 873, 1855, 1856, 1857, 1858, - 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1902, - 1867, 1868, 1877, 1903, 1873, 1874, 1875, 1854, - 1025, 1027, 1905, 1906, 1907, 1908, 1909, 1910, - 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, - 1919, 1920, 1921, 1922, 1904, 1025, 1027, 1905, - 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, - 1914, 1915, 1916, 1923, 1918, 1919, 1920, 1924, - 1922, 1904, 1159, 1161, 1925, 1926, 1927, 1928, - 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, - 1937, 1938, 1939, 1940, 1941, 1942, 1904, 1422, - 1424, 1944, 1945, 1946, 1947, 1948, 1949, 1950, - 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, - 1959, 1960, 1961, 1943, 1323, 1325, 1962, 1963, - 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, - 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, - 1943, 1323, 1325, 1962, 1963, 1964, 1965, 1966, - 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1980, - 1975, 1976, 1977, 1981, 1979, 1943, 1721, 1723, - 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, - 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, - 1999, 2000, 1982, 1721, 1723, 1983, 1984, 1985, - 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, - 1994, 1995, 1996, 1997, 1998, 2001, 2000, 1982, - 1721, 1723, 1983, 1984, 1985, 1986, 1987, 1988, - 1989, 1990, 1991, 1992, 1993, 1994, 2002, 1996, - 1997, 1998, 2003, 2000, 1982, 1715, 1717, 2004, - 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, - 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, - 2021, 1982, -} - -var _graphclust_trans_targs []int16 = []int16{ - 1974, 0, 1974, 1975, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, - 27, 28, 29, 30, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 42, - 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 66, 68, 70, - 71, 72, 1976, 69, 74, 75, 77, 78, - 79, 80, 81, 82, 83, 84, 85, 86, - 87, 88, 89, 90, 91, 93, 94, 96, - 102, 125, 130, 132, 139, 143, 97, 98, - 99, 100, 101, 103, 104, 105, 106, 107, - 108, 109, 110, 111, 112, 113, 114, 115, - 116, 117, 118, 119, 120, 121, 122, 123, - 124, 126, 127, 128, 129, 131, 133, 134, - 135, 136, 137, 138, 140, 141, 142, 144, - 291, 292, 1977, 158, 159, 160, 161, 162, - 163, 164, 165, 166, 167, 168, 169, 170, - 171, 172, 173, 174, 175, 176, 177, 178, - 179, 180, 181, 182, 183, 184, 185, 186, - 188, 189, 190, 191, 192, 193, 194, 195, - 196, 197, 198, 199, 200, 201, 202, 203, - 204, 205, 206, 207, 208, 210, 211, 212, - 213, 214, 216, 217, 219, 220, 221, 222, - 223, 224, 225, 226, 227, 228, 229, 230, - 231, 232, 234, 235, 237, 243, 267, 271, - 273, 280, 284, 238, 239, 240, 241, 242, - 244, 245, 246, 247, 248, 249, 250, 251, - 252, 253, 254, 255, 256, 257, 258, 259, - 260, 261, 262, 263, 264, 265, 266, 268, - 269, 270, 272, 274, 275, 276, 277, 278, - 279, 281, 282, 283, 285, 287, 288, 289, - 145, 290, 146, 294, 295, 296, 2, 297, - 3, 1974, 1978, 1974, 1979, 315, 316, 317, - 318, 319, 320, 321, 322, 323, 324, 325, - 326, 327, 328, 329, 330, 331, 332, 333, - 334, 335, 336, 337, 338, 339, 340, 341, - 342, 344, 345, 346, 347, 348, 349, 350, - 351, 352, 353, 354, 355, 356, 357, 358, - 359, 360, 361, 362, 363, 364, 366, 368, - 370, 371, 372, 1980, 369, 374, 375, 377, - 378, 379, 380, 381, 382, 383, 384, 385, - 386, 387, 388, 389, 390, 391, 393, 394, - 396, 402, 425, 430, 432, 439, 443, 397, - 398, 399, 400, 401, 403, 404, 405, 406, - 407, 408, 409, 410, 411, 412, 413, 414, - 415, 416, 417, 418, 419, 420, 421, 422, - 423, 424, 426, 427, 428, 429, 431, 433, - 434, 435, 436, 437, 438, 440, 441, 442, - 444, 591, 592, 1981, 458, 459, 460, 461, - 462, 463, 464, 465, 466, 467, 468, 469, - 470, 471, 472, 473, 474, 475, 476, 477, - 478, 479, 480, 481, 482, 483, 484, 485, - 486, 488, 489, 490, 491, 492, 493, 494, - 495, 496, 497, 498, 499, 500, 501, 502, - 503, 504, 505, 506, 507, 508, 510, 511, - 512, 513, 514, 516, 517, 519, 520, 521, - 522, 523, 524, 525, 526, 527, 528, 529, - 530, 531, 532, 534, 535, 537, 543, 567, - 571, 573, 580, 584, 538, 539, 540, 541, - 542, 544, 545, 546, 547, 548, 549, 550, - 551, 552, 553, 554, 555, 556, 557, 558, - 559, 560, 561, 562, 563, 564, 565, 566, - 568, 569, 570, 572, 574, 575, 576, 577, - 578, 579, 581, 582, 583, 585, 587, 588, - 589, 445, 590, 446, 594, 595, 596, 302, - 597, 303, 599, 605, 606, 608, 610, 613, - 616, 640, 1982, 622, 1983, 612, 1984, 615, - 618, 620, 621, 624, 625, 629, 630, 631, - 632, 633, 634, 635, 1985, 628, 639, 642, - 643, 644, 645, 646, 649, 650, 651, 652, - 653, 654, 655, 656, 660, 661, 663, 664, - 647, 666, 669, 671, 673, 667, 668, 670, - 672, 674, 678, 679, 680, 681, 682, 683, - 684, 685, 686, 687, 1986, 676, 677, 690, - 691, 299, 695, 696, 698, 997, 1000, 1003, - 1027, 1974, 1987, 1974, 1988, 712, 713, 714, - 715, 716, 717, 718, 719, 720, 721, 722, - 723, 724, 725, 726, 727, 728, 729, 730, - 731, 732, 733, 734, 735, 736, 737, 738, - 739, 741, 742, 743, 744, 745, 746, 747, - 748, 749, 750, 751, 752, 753, 754, 755, - 756, 757, 758, 759, 760, 761, 763, 765, - 767, 768, 769, 1989, 766, 771, 772, 774, - 775, 776, 777, 778, 779, 780, 781, 782, - 783, 784, 785, 786, 787, 788, 790, 791, - 793, 799, 822, 827, 829, 836, 840, 794, - 795, 796, 797, 798, 800, 801, 802, 803, - 804, 805, 806, 807, 808, 809, 810, 811, - 812, 813, 814, 815, 816, 817, 818, 819, - 820, 821, 823, 824, 825, 826, 828, 830, - 831, 832, 833, 834, 835, 837, 838, 839, - 841, 988, 989, 1990, 855, 856, 857, 858, - 859, 860, 861, 862, 863, 864, 865, 866, - 867, 868, 869, 870, 871, 872, 873, 874, - 875, 876, 877, 878, 879, 880, 881, 882, - 883, 885, 886, 887, 888, 889, 890, 891, - 892, 893, 894, 895, 896, 897, 898, 899, - 900, 901, 902, 903, 904, 905, 907, 908, - 909, 910, 911, 913, 914, 916, 917, 918, - 919, 920, 921, 922, 923, 924, 925, 926, - 927, 928, 929, 931, 932, 934, 940, 964, - 968, 970, 977, 981, 935, 936, 937, 938, - 939, 941, 942, 943, 944, 945, 946, 947, - 948, 949, 950, 951, 952, 953, 954, 955, - 956, 957, 958, 959, 960, 961, 962, 963, - 965, 966, 967, 969, 971, 972, 973, 974, - 975, 976, 978, 979, 980, 982, 984, 985, - 986, 842, 987, 843, 991, 992, 993, 699, - 994, 700, 1009, 1991, 999, 1992, 1002, 1005, - 1007, 1008, 1011, 1012, 1016, 1017, 1018, 1019, - 1020, 1021, 1022, 1993, 1015, 1026, 1029, 1327, - 1328, 1626, 1627, 1994, 1974, 1995, 1043, 1044, - 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, - 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, - 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, - 1069, 1070, 1072, 1073, 1074, 1075, 1076, 1077, - 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, - 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1094, - 1095, 1096, 1097, 1098, 1100, 1101, 1103, 1104, - 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, - 1113, 1114, 1115, 1116, 1117, 1119, 1120, 1122, - 1128, 1151, 1156, 1158, 1165, 1123, 1124, 1125, - 1126, 1127, 1129, 1130, 1131, 1132, 1133, 1134, - 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, - 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, - 1152, 1153, 1154, 1155, 1157, 1159, 1160, 1161, - 1162, 1163, 1164, 1166, 1167, 1168, 1170, 1171, - 1172, 1030, 1173, 1031, 1175, 1177, 1178, 1325, - 1326, 1996, 1192, 1193, 1194, 1195, 1196, 1197, - 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, - 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, - 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1222, - 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, - 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, - 1239, 1240, 1241, 1242, 1244, 1245, 1246, 1247, - 1248, 1250, 1251, 1253, 1254, 1255, 1256, 1257, - 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, - 1266, 1268, 1269, 1271, 1277, 1301, 1305, 1307, - 1314, 1318, 1272, 1273, 1274, 1275, 1276, 1278, - 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, - 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, - 1295, 1296, 1297, 1298, 1299, 1300, 1302, 1303, - 1304, 1306, 1308, 1309, 1310, 1311, 1312, 1313, - 1315, 1316, 1317, 1319, 1321, 1322, 1323, 1179, - 1324, 1180, 1997, 1974, 1342, 1343, 1344, 1345, - 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504, - 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, - 1377, 1513, 1514, 1515, 1516, 1517, 1518, 1519, - 1520, 1521, 1998, 1359, 1360, 1361, 1362, 1363, - 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, - 1372, 1373, 1374, 1375, 1376, 1378, 1379, 1380, - 1381, 1382, 1383, 1384, 1385, 1386, 1388, 1389, - 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, - 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, - 1406, 1407, 1408, 1410, 1412, 1414, 1415, 1416, - 1999, 1413, 1418, 1419, 1421, 1422, 1423, 1424, - 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, - 1433, 1434, 1435, 1437, 1438, 1440, 1446, 1469, - 1474, 1476, 1483, 1487, 1441, 1442, 1443, 1444, - 1445, 1447, 1448, 1449, 1450, 1451, 1452, 1453, - 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, - 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1470, - 1471, 1472, 1473, 1475, 1477, 1478, 1479, 1480, - 1481, 1482, 1484, 1485, 1486, 1488, 1489, 1490, - 1492, 1493, 1494, 1346, 1495, 1347, 1523, 1524, - 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, - 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, - 1541, 1542, 1543, 1545, 1546, 1547, 1548, 1549, - 1551, 1552, 1554, 1555, 1556, 1557, 1558, 1559, - 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, - 1569, 1570, 1572, 1578, 1602, 1606, 1608, 1615, - 1619, 1573, 1574, 1575, 1576, 1577, 1579, 1580, - 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, - 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, - 1597, 1598, 1599, 1600, 1601, 1603, 1604, 1605, - 1607, 1609, 1610, 1611, 1612, 1613, 1614, 1616, - 1617, 1618, 1620, 1622, 1623, 1624, 1329, 1625, - 1330, 1630, 1631, 1632, 1633, 1634, 1635, 1636, - 1637, 1641, 1642, 1643, 1644, 1645, 1647, 1648, - 1628, 1650, 1653, 1655, 1657, 1651, 1652, 1654, - 1656, 1658, 1959, 1960, 1961, 1962, 1963, 1964, - 1965, 1966, 1967, 1968, 2000, 1974, 2001, 1672, - 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, - 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, - 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696, - 1697, 1698, 1699, 1701, 1702, 1703, 1704, 1705, - 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, - 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, - 1723, 1725, 1727, 1728, 1729, 2002, 1726, 1731, - 1732, 1734, 1735, 1736, 1737, 1738, 1739, 1740, - 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, - 1750, 1751, 1753, 1759, 1782, 1787, 1789, 1796, - 1800, 1754, 1755, 1756, 1757, 1758, 1760, 1761, - 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, - 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, - 1778, 1779, 1780, 1781, 1783, 1784, 1785, 1786, - 1788, 1790, 1791, 1792, 1793, 1794, 1795, 1797, - 1798, 1799, 1801, 1948, 1949, 2003, 1815, 1816, - 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, - 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, - 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, - 1841, 1842, 1843, 1845, 1846, 1847, 1848, 1849, - 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, - 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, - 1867, 1868, 1869, 1870, 1871, 1873, 1874, 1876, - 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, - 1885, 1886, 1887, 1888, 1889, 1891, 1892, 1894, - 1900, 1924, 1928, 1930, 1937, 1941, 1895, 1896, - 1897, 1898, 1899, 1901, 1902, 1903, 1904, 1905, - 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, - 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, - 1922, 1923, 1925, 1926, 1927, 1929, 1931, 1932, - 1933, 1934, 1935, 1936, 1938, 1939, 1940, 1942, - 1944, 1945, 1946, 1802, 1947, 1803, 1951, 1952, - 1953, 1659, 1954, 1660, 1957, 1958, 1971, 1972, - 1973, 1974, 1, 1975, 299, 300, 301, 692, - 693, 694, 697, 1028, 1628, 1629, 1638, 1639, - 1640, 1646, 1649, 1969, 1970, 1974, 4, 5, - 6, 7, 8, 9, 10, 11, 12, 13, - 14, 43, 65, 73, 76, 92, 298, 293, - 67, 95, 147, 148, 149, 150, 151, 152, - 153, 154, 155, 156, 157, 187, 209, 215, - 218, 233, 236, 286, 1974, 600, 601, 602, - 603, 604, 607, 641, 648, 657, 658, 659, - 662, 665, 688, 689, 304, 305, 306, 307, - 308, 309, 310, 311, 312, 313, 314, 343, - 365, 373, 376, 392, 598, 593, 367, 395, - 447, 448, 449, 450, 451, 452, 453, 454, - 455, 456, 457, 487, 509, 515, 518, 533, - 536, 586, 609, 623, 636, 637, 638, 611, - 619, 614, 617, 626, 627, 675, 1974, 701, - 702, 703, 704, 705, 706, 707, 708, 709, - 710, 711, 996, 762, 770, 1010, 1023, 1024, - 1025, 789, 995, 990, 740, 773, 764, 792, - 844, 845, 846, 847, 848, 849, 850, 851, - 852, 853, 854, 884, 906, 912, 915, 930, - 933, 983, 998, 1006, 1001, 1004, 1013, 1014, - 1974, 1032, 1033, 1034, 1035, 1036, 1037, 1038, - 1039, 1040, 1041, 1042, 1071, 1174, 1099, 1102, - 1118, 1176, 1169, 1093, 1121, 1181, 1182, 1183, - 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, - 1221, 1243, 1249, 1252, 1267, 1270, 1320, 1974, - 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, - 1339, 1340, 1341, 1522, 1544, 1550, 1553, 1568, - 1571, 1621, 1348, 1349, 1350, 1351, 1352, 1353, - 1354, 1355, 1356, 1357, 1358, 1387, 1409, 1417, - 1420, 1436, 1496, 1491, 1411, 1439, 1974, 1661, - 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, - 1670, 1671, 1700, 1722, 1730, 1733, 1749, 1956, - 1950, 1955, 1724, 1752, 1804, 1805, 1806, 1807, - 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1844, - 1866, 1872, 1875, 1890, 1893, 1943, -} - -var _graphclust_trans_actions []byte = []byte{ - 31, 0, 27, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 34, 40, 25, 40, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 0, 40, 0, 40, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 40, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 40, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 29, 51, 17, 40, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 51, 0, 51, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 40, 0, 0, 0, 0, - 0, 0, 0, 40, 21, 40, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 40, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 19, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 40, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 40, 23, 40, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 40, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 40, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 43, 1, 47, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 15, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 13, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 5, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 9, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 7, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 11, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, -} - -var _graphclust_to_state_actions []byte = []byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 37, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, -} - -var _graphclust_from_state_actions []byte = []byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 3, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, -} - -var _graphclust_eof_trans []int16 = []int16{ - 0, 0, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 0, 0, 0, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 0, 0, 0, 0, - 0, 0, 610, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 610, 612, 612, - 610, 612, 612, 610, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 610, 612, - 612, 612, 612, 0, 0, 0, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 0, - 0, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1750, - 1750, 1750, 1789, 1789, 1789, 1789, 1789, 1789, - 1789, 1789, 1789, 1855, 1855, 1855, 1855, 1855, - 1855, 1855, 1905, 1905, 1905, 1944, 1944, 1944, - 1983, 1983, 1983, 1983, -} - -const graphclust_start int = 1974 -const graphclust_first_final int = 1974 -const graphclust_error int = 0 - -const graphclust_en_main int = 1974 - - -// line 14 "grapheme_clusters.rl" - - -var Error = errors.New("invalid UTF8 text") - -// ScanGraphemeClusters is a split function for bufio.Scanner that splits -// on grapheme cluster boundaries. -func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { - if len(data) == 0 { - return 0, nil, nil - } - - // Ragel state - cs := 0 // Current State - p := 0 // "Pointer" into data - pe := len(data) // End-of-data "pointer" - ts := 0 - te := 0 - act := 0 - eof := pe - - // Make Go compiler happy - _ = ts - _ = te - _ = act - _ = eof - - startPos := 0 - endPos := 0 - - -// line 4976 "grapheme_clusters.go" - { - cs = graphclust_start - ts = 0 - te = 0 - act = 0 - } - -// line 4984 "grapheme_clusters.go" - { - var _klen int - var _trans int - var _acts int - var _nacts uint - var _keys int - if p == pe { - goto _test_eof - } - if cs == 0 { - goto _out - } -_resume: - _acts = int(_graphclust_from_state_actions[cs]) - _nacts = uint(_graphclust_actions[_acts]); _acts++ - for ; _nacts > 0; _nacts-- { - _acts++ - switch _graphclust_actions[_acts - 1] { - case 4: -// line 1 "NONE" - -ts = p - -// line 5008 "grapheme_clusters.go" - } - } - - _keys = int(_graphclust_key_offsets[cs]) - _trans = int(_graphclust_index_offsets[cs]) - - _klen = int(_graphclust_single_lengths[cs]) - if _klen > 0 { - _lower := int(_keys) - var _mid int - _upper := int(_keys + _klen - 1) - for { - if _upper < _lower { - break - } - - _mid = _lower + ((_upper - _lower) >> 1) - switch { - case data[p] < _graphclust_trans_keys[_mid]: - _upper = _mid - 1 - case data[p] > _graphclust_trans_keys[_mid]: - _lower = _mid + 1 - default: - _trans += int(_mid - int(_keys)) - goto _match - } - } - _keys += _klen - _trans += _klen - } - - _klen = int(_graphclust_range_lengths[cs]) - if _klen > 0 { - _lower := int(_keys) - var _mid int - _upper := int(_keys + (_klen << 1) - 2) - for { - if _upper < _lower { - break - } - - _mid = _lower + (((_upper - _lower) >> 1) & ^1) - switch { - case data[p] < _graphclust_trans_keys[_mid]: - _upper = _mid - 2 - case data[p] > _graphclust_trans_keys[_mid + 1]: - _lower = _mid + 2 - default: - _trans += int((_mid - int(_keys)) >> 1) - goto _match - } - } - _trans += _klen - } - -_match: - _trans = int(_graphclust_indicies[_trans]) -_eof_trans: - cs = int(_graphclust_trans_targs[_trans]) - - if _graphclust_trans_actions[_trans] == 0 { - goto _again - } - - _acts = int(_graphclust_trans_actions[_trans]) - _nacts = uint(_graphclust_actions[_acts]); _acts++ - for ; _nacts > 0; _nacts-- { - _acts++ - switch _graphclust_actions[_acts-1] { - case 0: -// line 46 "grapheme_clusters.rl" - - - startPos = p - - case 1: -// line 50 "grapheme_clusters.rl" - - - endPos = p - - case 5: -// line 1 "NONE" - -te = p+1 - - case 6: -// line 54 "grapheme_clusters.rl" - -act = 3; - case 7: -// line 54 "grapheme_clusters.rl" - -te = p+1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 8: -// line 54 "grapheme_clusters.rl" - -te = p+1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 9: -// line 54 "grapheme_clusters.rl" - -te = p -p-- -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 10: -// line 54 "grapheme_clusters.rl" - -te = p -p-- -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 11: -// line 54 "grapheme_clusters.rl" - -te = p -p-- -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 12: -// line 54 "grapheme_clusters.rl" - -te = p -p-- -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 13: -// line 54 "grapheme_clusters.rl" - -te = p -p-- -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 14: -// line 54 "grapheme_clusters.rl" - -te = p -p-- -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 15: -// line 54 "grapheme_clusters.rl" - -p = (te) - 1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 16: -// line 54 "grapheme_clusters.rl" - -p = (te) - 1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 17: -// line 54 "grapheme_clusters.rl" - -p = (te) - 1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 18: -// line 54 "grapheme_clusters.rl" - -p = (te) - 1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 19: -// line 54 "grapheme_clusters.rl" - -p = (te) - 1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 20: -// line 54 "grapheme_clusters.rl" - -p = (te) - 1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 21: -// line 1 "NONE" - - switch act { - case 0: - {cs = 0 -goto _again -} - case 3: - {p = (te) - 1 - - return endPos+1, data[startPos:endPos+1], nil - } - } - -// line 5218 "grapheme_clusters.go" - } - } - -_again: - _acts = int(_graphclust_to_state_actions[cs]) - _nacts = uint(_graphclust_actions[_acts]); _acts++ - for ; _nacts > 0; _nacts-- { - _acts++ - switch _graphclust_actions[_acts-1] { - case 2: -// line 1 "NONE" - -ts = 0 - - case 3: -// line 1 "NONE" - -act = 0 - -// line 5238 "grapheme_clusters.go" - } - } - - if cs == 0 { - goto _out - } - p++ - if p != pe { - goto _resume - } - _test_eof: {} - if p == eof { - if _graphclust_eof_trans[cs] > 0 { - _trans = int(_graphclust_eof_trans[cs] - 1) - goto _eof_trans - } - } - - _out: {} - } - -// line 116 "grapheme_clusters.rl" - - - // If we fall out here then we were unable to complete a sequence. - // If we weren't able to complete a sequence then either we've - // reached the end of a partial buffer (so there's more data to come) - // or we have an isolated symbol that would normally be part of a - // grapheme cluster but has appeared in isolation here. - - if !atEOF { - // Request more - return 0, nil, nil - } - - // Just take the first UTF-8 sequence and return that. - _, seqLen := utf8.DecodeRune(data) - return seqLen, data[:seqLen], nil -} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl deleted file mode 100644 index 003ffbf59..000000000 --- a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl +++ /dev/null @@ -1,132 +0,0 @@ -package textseg - -import ( - "errors" - "unicode/utf8" -) - -// Generated from grapheme_clusters.rl. DO NOT EDIT -%%{ - # (except you are actually in grapheme_clusters.rl here, so edit away!) - - machine graphclust; - write data; -}%% - -var Error = errors.New("invalid UTF8 text") - -// ScanGraphemeClusters is a split function for bufio.Scanner that splits -// on grapheme cluster boundaries. -func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { - if len(data) == 0 { - return 0, nil, nil - } - - // Ragel state - cs := 0 // Current State - p := 0 // "Pointer" into data - pe := len(data) // End-of-data "pointer" - ts := 0 - te := 0 - act := 0 - eof := pe - - // Make Go compiler happy - _ = ts - _ = te - _ = act - _ = eof - - startPos := 0 - endPos := 0 - - %%{ - include GraphemeCluster "grapheme_clusters_table.rl"; - - action start { - startPos = p - } - - action end { - endPos = p - } - - action emit { - return endPos+1, data[startPos:endPos+1], nil - } - - ZWJGlue = ZWJ (Glue_After_Zwj | E_Base_GAZ Extend* E_Modifier?)?; - AnyExtender = Extend | ZWJGlue | SpacingMark; - Extension = AnyExtender*; - ReplacementChar = (0xEF 0xBF 0xBD); - - CRLFSeq = CR LF; - ControlSeq = Control | ReplacementChar; - HangulSeq = ( - L+ (((LV? V+ | LVT) T*)?|LV?) | - LV V* T* | - V+ T* | - LVT T* | - T+ - ) Extension; - EmojiSeq = (E_Base | E_Base_GAZ) Extend* E_Modifier? Extension; - ZWJSeq = ZWJGlue Extension; - EmojiFlagSeq = Regional_Indicator Regional_Indicator? Extension; - - UTF8Cont = 0x80 .. 0xBF; - AnyUTF8 = ( - 0x00..0x7F | - 0xC0..0xDF . UTF8Cont | - 0xE0..0xEF . UTF8Cont . UTF8Cont | - 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont - ); - - # OtherSeq is any character that isn't at the start of one of the extended sequences above, followed by extension - OtherSeq = (AnyUTF8 - (CR|LF|Control|ReplacementChar|L|LV|V|LVT|T|E_Base|E_Base_GAZ|ZWJ|Regional_Indicator|Prepend)) Extension; - - # PrependSeq is prepend followed by any of the other patterns above, except control characters which explicitly break - PrependSeq = Prepend+ (HangulSeq|EmojiSeq|ZWJSeq|EmojiFlagSeq|OtherSeq)?; - - CRLFTok = CRLFSeq >start @end; - ControlTok = ControlSeq >start @end; - HangulTok = HangulSeq >start @end; - EmojiTok = EmojiSeq >start @end; - ZWJTok = ZWJSeq >start @end; - EmojiFlagTok = EmojiFlagSeq >start @end; - OtherTok = OtherSeq >start @end; - PrependTok = PrependSeq >start @end; - - main := |* - CRLFTok => emit; - ControlTok => emit; - HangulTok => emit; - EmojiTok => emit; - ZWJTok => emit; - EmojiFlagTok => emit; - PrependTok => emit; - OtherTok => emit; - - # any single valid UTF-8 character would also be valid per spec, - # but we'll handle that separately after the loop so we can deal - # with requesting more bytes if we're not at EOF. - *|; - - write init; - write exec; - }%% - - // If we fall out here then we were unable to complete a sequence. - // If we weren't able to complete a sequence then either we've - // reached the end of a partial buffer (so there's more data to come) - // or we have an isolated symbol that would normally be part of a - // grapheme cluster but has appeared in isolation here. - - if !atEOF { - // Request more - return 0, nil, nil - } - - // Just take the first UTF-8 sequence and return that. - _, seqLen := utf8.DecodeRune(data) - return seqLen, data[:seqLen], nil -} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl deleted file mode 100644 index fb4511825..000000000 --- a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl +++ /dev/null @@ -1,1583 +0,0 @@ -# The following Ragel file was autogenerated with unicode2ragel.rb -# from: http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -# -# It defines ["Prepend", "CR", "LF", "Control", "Extend", "Regional_Indicator", "SpacingMark", "L", "V", "T", "LV", "LVT", "E_Base", "E_Modifier", "ZWJ", "Glue_After_Zwj", "E_Base_GAZ"]. -# -# To use this, make sure that your alphtype is set to byte, -# and that your input is in utf8. - -%%{ - machine GraphemeCluster; - - Prepend = - 0xD8 0x80..0x85 #Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER ... - | 0xDB 0x9D #Cf ARABIC END OF AYAH - | 0xDC 0x8F #Cf SYRIAC ABBREVIATION MARK - | 0xE0 0xA3 0xA2 #Cf ARABIC DISPUTED END OF AYAH - | 0xE0 0xB5 0x8E #Lo MALAYALAM LETTER DOT REPH - | 0xF0 0x91 0x82 0xBD #Cf KAITHI NUMBER SIGN - | 0xF0 0x91 0x87 0x82..0x83 #Lo [2] SHARADA SIGN JIHVAMULIYA..SHARA... - ; - - CR = - 0x0D #Cc - ; - - LF = - 0x0A #Cc - ; - - Control = - 0x00..0x09 #Cc [10] .. - | 0x0B..0x0C #Cc [2] .. - | 0x0E..0x1F #Cc [18] .. - | 0x7F #Cc [33] .. - | 0xC2 0x80..0x9F # - | 0xC2 0xAD #Cf SOFT HYPHEN - | 0xD8 0x9C #Cf ARABIC LETTER MARK - | 0xE1 0xA0 0x8E #Cf MONGOLIAN VOWEL SEPARATOR - | 0xE2 0x80 0x8B #Cf ZERO WIDTH SPACE - | 0xE2 0x80 0x8E..0x8F #Cf [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT ... - | 0xE2 0x80 0xA8 #Zl LINE SEPARATOR - | 0xE2 0x80 0xA9 #Zp PARAGRAPH SEPARATOR - | 0xE2 0x80 0xAA..0xAE #Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-... - | 0xE2 0x81 0xA0..0xA4 #Cf [5] WORD JOINER..INVISIBLE PLUS - | 0xE2 0x81 0xA5 #Cn - | 0xE2 0x81 0xA6..0xAF #Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIG... - | 0xED 0xA0 0x80..0xFF #Cs [2048] .... - | 0xEF 0xBF 0xB9..0xBB #Cf [3] INTERLINEAR ANNOTATION ANCHOR..INT... - | 0xF0 0x9B 0xB2 0xA0..0xA3 #Cf [4] SHORTHAND FORMAT LETTER OVERLAP... - | 0xF0 0x9D 0x85 0xB3..0xBA #Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSI... - | 0xF3 0xA0 0x80 0x80 #Cn - | 0xF3 0xA0 0x80 0x81 #Cf LANGUAGE TAG - | 0xF3 0xA0 0x80 0x82..0x9F #Cn [30] .. - | 0xF3 0xA0 0x82 0x80..0xFF #Cn [128] .. - | 0xF3 0xA0 0x83 0x00..0xBF # - | 0xF3 0xA0 0x87 0xB0..0xFF #Cn [3600] .. -# -# This script uses the unicode spec to generate a Ragel state machine -# that recognizes unicode alphanumeric characters. It generates 5 -# character classes: uupper, ulower, ualpha, udigit, and ualnum. -# Currently supported encodings are UTF-8 [default] and UCS-4. -# -# Usage: unicode2ragel.rb [options] -# -e, --encoding [ucs4 | utf8] Data encoding -# -h, --help Show this message -# -# This script was originally written as part of the Ferret search -# engine library. -# -# Author: Rakan El-Khalil - -require 'optparse' -require 'open-uri' - -ENCODINGS = [ :utf8, :ucs4 ] -ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" } -DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt" -DEFAULT_MACHINE_NAME= "WChar" - -### -# Display vars & default option - -TOTAL_WIDTH = 80 -RANGE_WIDTH = 23 -@encoding = :utf8 -@chart_url = DEFAULT_CHART_URL -machine_name = DEFAULT_MACHINE_NAME -properties = [] -@output = $stdout - -### -# Option parsing - -cli_opts = OptionParser.new do |opts| - opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o| - @encoding = o.downcase.to_sym - end - opts.on("-h", "--help", "Show this message") do - puts opts - exit - end - opts.on("-u", "--url URL", "URL to process") do |o| - @chart_url = o - end - opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o| - machine_name = o - end - opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o| - properties = o - end - opts.on("-o", "--output FILE", "output file") do |o| - @output = File.new(o, "w+") - end -end - -cli_opts.parse(ARGV) -unless ENCODINGS.member? @encoding - puts "Invalid encoding: #{@encoding}" - puts cli_opts - exit -end - -## -# Downloads the document at url and yields every alpha line's hex -# range and description. - -def each_alpha( url, property ) - open( url ) do |file| - file.each_line do |line| - next if line =~ /^#/; - next if line !~ /; #{property} #/; - - range, description = line.split(/;/) - range.strip! - description.gsub!(/.*#/, '').strip! - - if range =~ /\.\./ - start, stop = range.split '..' - else start = stop = range - end - - yield start.hex .. stop.hex, description - end - end -end - -### -# Formats to hex at minimum width - -def to_hex( n ) - r = "%0X" % n - r = "0#{r}" unless (r.length % 2).zero? - r -end - -### -# UCS4 is just a straight hex conversion of the unicode codepoint. - -def to_ucs4( range ) - rangestr = "0x" + to_hex(range.begin) - rangestr << "..0x" + to_hex(range.end) if range.begin != range.end - [ rangestr ] -end - -## -# 0x00 - 0x7f -> 0zzzzzzz[7] -# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6] -# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6] -# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6] - -UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff] - -def to_utf8_enc( n ) - r = 0 - if n <= 0x7f - r = n - elsif n <= 0x7ff - y = 0xc0 | (n >> 6) - z = 0x80 | (n & 0x3f) - r = y << 8 | z - elsif n <= 0xffff - x = 0xe0 | (n >> 12) - y = 0x80 | (n >> 6) & 0x3f - z = 0x80 | n & 0x3f - r = x << 16 | y << 8 | z - elsif n <= 0x10ffff - w = 0xf0 | (n >> 18) - x = 0x80 | (n >> 12) & 0x3f - y = 0x80 | (n >> 6) & 0x3f - z = 0x80 | n & 0x3f - r = w << 24 | x << 16 | y << 8 | z - end - - to_hex(r) -end - -def from_utf8_enc( n ) - n = n.hex - r = 0 - if n <= 0x7f - r = n - elsif n <= 0xdfff - y = (n >> 8) & 0x1f - z = n & 0x3f - r = y << 6 | z - elsif n <= 0xefffff - x = (n >> 16) & 0x0f - y = (n >> 8) & 0x3f - z = n & 0x3f - r = x << 10 | y << 6 | z - elsif n <= 0xf7ffffff - w = (n >> 24) & 0x07 - x = (n >> 16) & 0x3f - y = (n >> 8) & 0x3f - z = n & 0x3f - r = w << 18 | x << 12 | y << 6 | z - end - r -end - -### -# Given a range, splits it up into ranges that can be continuously -# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff] -# This is not strictly needed since the current [5.1] unicode standard -# doesn't have ranges that straddle utf8 boundaries. This is included -# for completeness as there is no telling if that will ever change. - -def utf8_ranges( range ) - ranges = [] - UTF8_BOUNDARIES.each do |max| - if range.begin <= max - if range.end <= max - ranges << range - return ranges - end - - ranges << (range.begin .. max) - range = (max + 1) .. range.end - end - end - ranges -end - -def build_range( start, stop ) - size = start.size/2 - left = size - 1 - return [""] if size < 1 - - a = start[0..1] - b = stop[0..1] - - ### - # Shared prefix - - if a == b - return build_range(start[2..-1], stop[2..-1]).map do |elt| - "0x#{a} " + elt - end - end - - ### - # Unshared prefix, end of run - - return ["0x#{a}..0x#{b} "] if left.zero? - - ### - # Unshared prefix, not end of run - # Range can be 0x123456..0x56789A - # Which is equivalent to: - # 0x123456 .. 0x12FFFF - # 0x130000 .. 0x55FFFF - # 0x560000 .. 0x56789A - - ret = [] - ret << build_range(start, a + "FF" * left) - - ### - # Only generate middle range if need be. - - if a.hex+1 != b.hex - max = to_hex(b.hex - 1) - max = "FF" if b == "FF" - ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left - end - - ### - # Don't generate last range if it is covered by first range - - ret << build_range(b + "00" * left, stop) unless b == "FF" - ret.flatten! -end - -def to_utf8( range ) - utf8_ranges( range ).map do |r| - begin_enc = to_utf8_enc(r.begin) - end_enc = to_utf8_enc(r.end) - build_range begin_enc, end_enc - end.flatten! -end - -## -# Perform a 3-way comparison of the number of codepoints advertised by -# the unicode spec for the given range, the originally parsed range, -# and the resulting utf8 encoded range. - -def count_codepoints( code ) - code.split(' ').inject(1) do |acc, elt| - if elt =~ /0x(.+)\.\.0x(.+)/ - if @encoding == :utf8 - acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1) - else - acc * ($2.hex - $1.hex + 1) - end - else - acc - end - end -end - -def is_valid?( range, desc, codes ) - spec_count = 1 - spec_count = $1.to_i if desc =~ /\[(\d+)\]/ - range_count = range.end - range.begin + 1 - - sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) } - sum == spec_count and sum == range_count -end - -## -# Generate the state maching to stdout - -def generate_machine( name, property ) - pipe = " " - @output.puts " #{name} = " - each_alpha( @chart_url, property ) do |range, desc| - - codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range) - - #raise "Invalid encoding of range #{range}: #{codes.inspect}" unless - # is_valid? range, desc, codes - - range_width = codes.map { |a| a.size }.max - range_width = RANGE_WIDTH if range_width < RANGE_WIDTH - - desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11 - desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH - - if desc.size > desc_width - desc = desc[0..desc_width - 4] + "..." - end - - codes.each_with_index do |r, idx| - desc = "" unless idx.zero? - code = "%-#{range_width}s" % r - @output.puts " #{pipe} #{code} ##{desc}" - pipe = "|" - end - end - @output.puts " ;" - @output.puts "" -end - -@output.puts < 0 { + for name := range jsonObject { + return fmt.Errorf("unknown field %q in %v", name, md.FullName()) + } + } + return nil +} + +func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { + if md := fd.Message(); md != nil { + return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated + } + return false +} + +func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { + if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { + _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) + return ok + } + return false +} + +func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch { + case fd.IsList(): + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return v, err + } + lv := v.List() + for _, raw := range jsonArray { + ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd) + if err != nil { + return v, err + } + lv.Append(ve) + } + return v, nil + case fd.IsMap(): + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return v, err + } + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + for key, raw := range jsonObject { + var kv protoreflect.MapKey + if kfd.Kind() == protoreflect.StringKind { + kv = protoreflect.ValueOf(key).MapKey() + } else { + v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd) + if err != nil { + return v, err + } + kv = v.MapKey() + } + + vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd) + if err != nil { + return v, err + } + mv.Set(kv, vv) + } + return v, nil + default: + return u.unmarshalSingularValue(v, in, fd) + } +} + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(+1), + `"-Infinity"`: math.Inf(-1), +} + +func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch fd.Kind() { + case protoreflect.BoolKind: + return unmarshalValue(in, new(bool)) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return unmarshalValue(trimQuote(in), new(int32)) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return unmarshalValue(trimQuote(in), new(int64)) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return unmarshalValue(trimQuote(in), new(uint32)) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return unmarshalValue(trimQuote(in), new(uint64)) + case protoreflect.FloatKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat32(float32(f)), nil + } + return unmarshalValue(trimQuote(in), new(float32)) + case protoreflect.DoubleKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat64(float64(f)), nil + } + return unmarshalValue(trimQuote(in), new(float64)) + case protoreflect.StringKind: + return unmarshalValue(in, new(string)) + case protoreflect.BytesKind: + return unmarshalValue(in, new([]byte)) + case protoreflect.EnumKind: + if hasPrefixAndSuffix('"', in, '"') { + vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in))) + if vd == nil { + return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName()) + } + return protoreflect.ValueOfEnum(vd.Number()), nil + } + return unmarshalValue(in, new(protoreflect.EnumNumber)) + case protoreflect.MessageKind, protoreflect.GroupKind: + err := u.unmarshalMessage(v.Message(), in) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } +} + +func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) { + err := json.Unmarshal(in, v) + return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err +} + +func unquoteString(in string) (out string, err error) { + err = json.Unmarshal([]byte(in), &out) + return out, err +} + +func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool { + if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix { + return true + } + return false +} + +// trimQuote is like unquoteString but simply strips surrounding quotes. +// This is incorrect, but is behavior done by the legacy implementation. +func trimQuote(in []byte) []byte { + if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' { + in = in[1 : len(in)-1] + } + return in +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go new file mode 100644 index 000000000..685c80a62 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go @@ -0,0 +1,559 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONMarshalV2 = false + +// Marshaler is a configurable object for marshaling protocol buffer messages +// to the specified JSON representation. +type Marshaler struct { + // OrigName specifies whether to use the original protobuf name for fields. + OrigName bool + + // EnumsAsInts specifies whether to render enum values as integers, + // as opposed to string values. + EnumsAsInts bool + + // EmitDefaults specifies whether to render fields with zero values. + EmitDefaults bool + + // Indent controls whether the output is compact or not. + // If empty, the output is compact JSON. Otherwise, every JSON object + // entry and JSON array value will be on its own line. + // Each line will be preceded by repeated copies of Indent, where the + // number of copies is the current indentation depth. + Indent string + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should also +// implement JSONPBUnmarshaler so that the custom format can be parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// Marshal serializes a protobuf message as JSON into w. +func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error { + b, err := jm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// MarshalToString serializes a protobuf message as JSON in string form. +func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) { + b, err := jm.marshal(m) + if err != nil { + return "", err + } + return string(b), nil +} + +func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) { + v := reflect.ValueOf(m) + if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, errors.New("Marshal called with nil") + } + + // Check for custom marshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsm, ok := m.(JSONPBMarshaler); ok { + return jsm.MarshalJSONPB(jm) + } + + if wrapJSONMarshalV2 { + opts := protojson.MarshalOptions{ + UseProtoNames: jm.OrigName, + UseEnumNumbers: jm.EnumsAsInts, + EmitUnpopulated: jm.EmitDefaults, + Indent: jm.Indent, + } + if jm.AnyResolver != nil { + opts.Resolver = anyResolver{jm.AnyResolver} + } + return opts.Marshal(proto.MessageReflect(m).Interface()) + } else { + // Check for unpopulated required fields first. + m2 := proto.MessageReflect(m) + if err := protoV2.CheckInitialized(m2.Interface()); err != nil { + return nil, err + } + + w := jsonWriter{Marshaler: jm} + err := w.marshalMessage(m2, "", "") + return w.buf, err + } +} + +type jsonWriter struct { + *Marshaler + buf []byte +} + +func (w *jsonWriter) write(s string) { + w.buf = append(w.buf, s...) +} + +func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error { + if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(w.Marshaler) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if b, err = json.Marshal(js); err != nil { + return err + } + } + w.write(string(b)) + return nil + } + + md := m.Descriptor() + fds := md.Fields() + + // Handle well-known types. + const secondInNanos = int64(time.Second / time.Nanosecond) + switch wellKnownType(md.FullName()) { + case "Any": + return w.marshalAny(m, indent) + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + case "Duration": + const maxSecondsInDuration = 315576000000 + // "Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + var sign string + if s < 0 || ns < 0 { + sign, s, ns = "-", -1*s, -1*ns + } + x := fmt.Sprintf("%s%d.%09d", sign, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vs"`, x)) + return nil + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vZ"`, x)) + return nil + case "Value": + // JSON value; which is a null, number, string, bool, object, or array. + od := md.Oneofs().Get(0) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("nil Value") + } + return w.marshalValue(fd, m.Get(fd), indent) + case "Struct", "ListValue": + // JSON object or array. + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + + firstField := true + if typeURL != "" { + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + if fd == nil { + continue + } + } else { + i++ + } + + v := m.Get(fd) + + if !m.Has(fd) { + if !w.EmitDefaults || fd.ContainingOneof() != nil { + continue + } + if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) { + v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars + } + } + + if !firstField { + w.writeComma() + } + if err := w.marshalField(fd, v, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if md.ExtensionRanges().Len() > 0 { + // Collect a sorted list of all extension descriptor and values. + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + if !firstField { + w.writeComma() + } + if err := w.marshalField(ext.desc, ext.val, indent); err != nil { + return err + } + firstField = false + } + } + + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) writeComma() { + if w.Indent != "" { + w.write(",\n") + } else { + w.write(",") + } +} + +func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + md := m.Descriptor() + typeURL := m.Get(md.Fields().ByNumber(1)).String() + rawVal := m.Get(md.Fields().ByNumber(2)).Bytes() + + var m2 protoreflect.Message + if w.AnyResolver != nil { + mi, err := w.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + return err + } + m2 = mt.New() + } + + if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil { + return err + } + + if wellKnownType(m2.Descriptor().FullName()) == "" { + return w.marshalMessage(m2, indent, typeURL) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + w.writeComma() + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + w.write(`"value": `) + } else { + w.write(`"value":`) + } + if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil { + return err + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"@type":`) + if w.Indent != "" { + w.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + w.write(string(b)) + return nil +} + +// marshalField writes field description and value to the Writer. +func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"`) + switch { + case fd.IsExtension(): + // For message set, use the fname of the message as the extension name. + name := string(fd.FullName()) + if isMessageSet(fd.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + w.write("[" + name + "]") + case w.OrigName: + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + w.write(name) + default: + w.write(string(fd.JSONName())) + } + w.write(`":`) + if w.Indent != "" { + w.write(" ") + } + return w.marshalValue(fd, v, indent) +} + +func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case fd.IsList(): + w.write("[") + comma := "" + lv := v.List() + for i := 0; i < lv.Len(); i++ { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write("]") + return nil + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + + // Collect a sorted list of all map keys and values. + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + + w.write(`{`) + comma := "" + for _, entry := range entries { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + + s := fmt.Sprint(entry.key.Interface()) + b, err := json.Marshal(s) + if err != nil { + return err + } + w.write(string(b)) + + w.write(`:`) + if w.Indent != "" { + w.write(` `) + } + + if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write(`}`) + return nil + default: + return w.marshalSingularValue(fd, v, indent) + } +} + +func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case !v.IsValid(): + w.write("null") + return nil + case fd.Message() != nil: + return w.marshalMessage(v.Message(), indent+w.Indent, "") + case fd.Enum() != nil: + if fd.Enum().FullName() == "google.protobuf.NullValue" { + w.write("null") + return nil + } + + vd := fd.Enum().Values().ByNumber(v.Enum()) + if vd == nil || w.EnumsAsInts { + w.write(strconv.Itoa(int(v.Enum()))) + } else { + w.write(`"` + string(vd.Name()) + `"`) + } + return nil + default: + switch v.Interface().(type) { + case float32, float64: + switch { + case math.IsInf(v.Float(), +1): + w.write(`"Infinity"`) + return nil + case math.IsInf(v.Float(), -1): + w.write(`"-Infinity"`) + return nil + case math.IsNaN(v.Float()): + w.write(`"NaN"`) + return nil + } + case int64, uint64: + w.write(fmt.Sprintf(`"%d"`, v.Interface())) + return nil + } + + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + w.write(string(b)) + return nil + } +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go new file mode 100644 index 000000000..480e2448d --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/json.go @@ -0,0 +1,69 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jsonpb provides functionality to marshal and unmarshal between a +// protocol buffer message and JSON. It follows the specification at +// https://developers.google.com/protocol-buffers/docs/proto3#json. +// +// Do not rely on the default behavior of the standard encoding/json package +// when called on generated message types as it does not operate correctly. +// +// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson" +// package instead. +package jsonpb + +import ( + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// AnyResolver takes a type URL, present in an Any message, +// and resolves it into an instance of the associated message. +type AnyResolver interface { + Resolve(typeURL string) (proto.Message, error) +} + +type anyResolver struct{ AnyResolver } + +func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + return r.FindMessageByURL(string(message)) +} + +func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { + m, err := r.Resolve(url) + if err != nil { + return nil, err + } + return protoimpl.X.MessageTypeOf(m), nil +} + +func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByName(field) +} + +func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) +} + +func wellKnownType(s protoreflect.FullName) string { + if s.Parent() == "google.protobuf" { + switch s.Name() { + case "Empty", "Any", + "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue", + "Duration", "Timestamp", + "NullValue", "Struct", "Value", "ListValue": + return string(s.Name()) + } + } + return "" +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go index 1e7ff6420..066b4323b 100644 --- a/vendor/github.com/golang/protobuf/proto/registry.go +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -13,6 +13,7 @@ import ( "strings" "sync" + "google.golang.org/protobuf/reflect/protodesc" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoimpl" @@ -62,14 +63,7 @@ func FileDescriptor(s filePath) fileDescGZIP { // Find the descriptor in the v2 registry. var b []byte if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { - if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok { - b = fd.ProtoLegacyRawDesc() - } else { - // TODO: Use protodesc.ToFileDescriptorProto to construct - // a descriptorpb.FileDescriptorProto and marshal it. - // However, doing so causes the proto package to have a dependency - // on descriptorpb, leading to cyclic dependency issues. - } + b, _ = Marshal(protodesc.ToFileDescriptorProto(fd)) } // Locally cache the raw descriptor form for the file. diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go deleted file mode 100644 index 63dc05785..000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go +++ /dev/null @@ -1,200 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto - -package descriptor - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - descriptorpb "google.golang.org/protobuf/types/descriptorpb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/descriptor.proto. - -type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type - -const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE -const FieldDescriptorProto_TYPE_FLOAT = descriptorpb.FieldDescriptorProto_TYPE_FLOAT -const FieldDescriptorProto_TYPE_INT64 = descriptorpb.FieldDescriptorProto_TYPE_INT64 -const FieldDescriptorProto_TYPE_UINT64 = descriptorpb.FieldDescriptorProto_TYPE_UINT64 -const FieldDescriptorProto_TYPE_INT32 = descriptorpb.FieldDescriptorProto_TYPE_INT32 -const FieldDescriptorProto_TYPE_FIXED64 = descriptorpb.FieldDescriptorProto_TYPE_FIXED64 -const FieldDescriptorProto_TYPE_FIXED32 = descriptorpb.FieldDescriptorProto_TYPE_FIXED32 -const FieldDescriptorProto_TYPE_BOOL = descriptorpb.FieldDescriptorProto_TYPE_BOOL -const FieldDescriptorProto_TYPE_STRING = descriptorpb.FieldDescriptorProto_TYPE_STRING -const FieldDescriptorProto_TYPE_GROUP = descriptorpb.FieldDescriptorProto_TYPE_GROUP -const FieldDescriptorProto_TYPE_MESSAGE = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE -const FieldDescriptorProto_TYPE_BYTES = descriptorpb.FieldDescriptorProto_TYPE_BYTES -const FieldDescriptorProto_TYPE_UINT32 = descriptorpb.FieldDescriptorProto_TYPE_UINT32 -const FieldDescriptorProto_TYPE_ENUM = descriptorpb.FieldDescriptorProto_TYPE_ENUM -const FieldDescriptorProto_TYPE_SFIXED32 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED32 -const FieldDescriptorProto_TYPE_SFIXED64 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED64 -const FieldDescriptorProto_TYPE_SINT32 = descriptorpb.FieldDescriptorProto_TYPE_SINT32 -const FieldDescriptorProto_TYPE_SINT64 = descriptorpb.FieldDescriptorProto_TYPE_SINT64 - -var FieldDescriptorProto_Type_name = descriptorpb.FieldDescriptorProto_Type_name -var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_value - -type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label - -const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL -const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED -const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED - -var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name -var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value - -type FileOptions_OptimizeMode = descriptorpb.FileOptions_OptimizeMode - -const FileOptions_SPEED = descriptorpb.FileOptions_SPEED -const FileOptions_CODE_SIZE = descriptorpb.FileOptions_CODE_SIZE -const FileOptions_LITE_RUNTIME = descriptorpb.FileOptions_LITE_RUNTIME - -var FileOptions_OptimizeMode_name = descriptorpb.FileOptions_OptimizeMode_name -var FileOptions_OptimizeMode_value = descriptorpb.FileOptions_OptimizeMode_value - -type FieldOptions_CType = descriptorpb.FieldOptions_CType - -const FieldOptions_STRING = descriptorpb.FieldOptions_STRING -const FieldOptions_CORD = descriptorpb.FieldOptions_CORD -const FieldOptions_STRING_PIECE = descriptorpb.FieldOptions_STRING_PIECE - -var FieldOptions_CType_name = descriptorpb.FieldOptions_CType_name -var FieldOptions_CType_value = descriptorpb.FieldOptions_CType_value - -type FieldOptions_JSType = descriptorpb.FieldOptions_JSType - -const FieldOptions_JS_NORMAL = descriptorpb.FieldOptions_JS_NORMAL -const FieldOptions_JS_STRING = descriptorpb.FieldOptions_JS_STRING -const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER - -var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name -var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value - -type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel - -const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN -const MethodOptions_NO_SIDE_EFFECTS = descriptorpb.MethodOptions_NO_SIDE_EFFECTS -const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT - -var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name -var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value - -type FileDescriptorSet = descriptorpb.FileDescriptorSet -type FileDescriptorProto = descriptorpb.FileDescriptorProto -type DescriptorProto = descriptorpb.DescriptorProto -type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions -type FieldDescriptorProto = descriptorpb.FieldDescriptorProto -type OneofDescriptorProto = descriptorpb.OneofDescriptorProto -type EnumDescriptorProto = descriptorpb.EnumDescriptorProto -type EnumValueDescriptorProto = descriptorpb.EnumValueDescriptorProto -type ServiceDescriptorProto = descriptorpb.ServiceDescriptorProto -type MethodDescriptorProto = descriptorpb.MethodDescriptorProto - -const Default_MethodDescriptorProto_ClientStreaming = descriptorpb.Default_MethodDescriptorProto_ClientStreaming -const Default_MethodDescriptorProto_ServerStreaming = descriptorpb.Default_MethodDescriptorProto_ServerStreaming - -type FileOptions = descriptorpb.FileOptions - -const Default_FileOptions_JavaMultipleFiles = descriptorpb.Default_FileOptions_JavaMultipleFiles -const Default_FileOptions_JavaStringCheckUtf8 = descriptorpb.Default_FileOptions_JavaStringCheckUtf8 -const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_OptimizeFor -const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices -const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices -const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices -const Default_FileOptions_PhpGenericServices = descriptorpb.Default_FileOptions_PhpGenericServices -const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated -const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas - -type MessageOptions = descriptorpb.MessageOptions - -const Default_MessageOptions_MessageSetWireFormat = descriptorpb.Default_MessageOptions_MessageSetWireFormat -const Default_MessageOptions_NoStandardDescriptorAccessor = descriptorpb.Default_MessageOptions_NoStandardDescriptorAccessor -const Default_MessageOptions_Deprecated = descriptorpb.Default_MessageOptions_Deprecated - -type FieldOptions = descriptorpb.FieldOptions - -const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype -const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype -const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy -const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated -const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak - -type OneofOptions = descriptorpb.OneofOptions -type EnumOptions = descriptorpb.EnumOptions - -const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecated - -type EnumValueOptions = descriptorpb.EnumValueOptions - -const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated - -type ServiceOptions = descriptorpb.ServiceOptions - -const Default_ServiceOptions_Deprecated = descriptorpb.Default_ServiceOptions_Deprecated - -type MethodOptions = descriptorpb.MethodOptions - -const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Deprecated -const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel - -type UninterpretedOption = descriptorpb.UninterpretedOption -type SourceCodeInfo = descriptorpb.SourceCodeInfo -type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo -type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange -type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange -type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange -type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart -type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location -type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation - -var File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = []byte{ - 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, - 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x32, -} - -var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() } -func file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() { - if File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto = out.File - file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = nil - file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = nil - file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index e729dcff1..85f9f5736 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -19,6 +19,8 @@ const urlPrefix = "type.googleapis.com/" // AnyMessageName returns the message name contained in an anypb.Any message. // Most type assertions should use the Is function instead. +// +// Deprecated: Call the any.MessageName method instead. func AnyMessageName(any *anypb.Any) (string, error) { name, err := anyMessageName(any) return string(name), err @@ -38,6 +40,8 @@ func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { } // MarshalAny marshals the given message m into an anypb.Any message. +// +// Deprecated: Call the anypb.New function instead. func MarshalAny(m proto.Message) (*anypb.Any, error) { switch dm := m.(type) { case DynamicAny: @@ -58,6 +62,9 @@ func MarshalAny(m proto.Message) (*anypb.Any, error) { // Empty returns a new message of the type specified in an anypb.Any message. // It returns protoregistry.NotFound if the corresponding message type could not // be resolved in the global registry. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead +// to resolve the message name and create a new instance of it. func Empty(any *anypb.Any) (proto.Message, error) { name, err := anyMessageName(any) if err != nil { @@ -76,6 +83,8 @@ func Empty(any *anypb.Any) (proto.Message, error) { // // The target message m may be a *DynamicAny message. If the underlying message // type could not be resolved, then this returns protoregistry.NotFound. +// +// Deprecated: Call the any.UnmarshalTo method instead. func UnmarshalAny(any *anypb.Any, m proto.Message) error { if dm, ok := m.(*DynamicAny); ok { if dm.Message == nil { @@ -100,6 +109,8 @@ func UnmarshalAny(any *anypb.Any, m proto.Message) error { } // Is reports whether the Any message contains a message of the specified type. +// +// Deprecated: Call the any.MessageIs method instead. func Is(any *anypb.Any, m proto.Message) bool { if any == nil || m == nil { return false @@ -119,6 +130,9 @@ func Is(any *anypb.Any, m proto.Message) bool { // var x ptypes.DynamicAny // if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } // fmt.Printf("unmarshaled message: %v", x.Message) +// +// Deprecated: Use the any.UnmarshalNew method instead to unmarshal +// the any message contents into a new instance of the underlying message. type DynamicAny struct{ proto.Message } func (m DynamicAny) String() string { diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go index fb9edd5c6..d3c33259d 100644 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -3,4 +3,8 @@ // license that can be found in the LICENSE file. // Package ptypes provides functionality for interacting with well-known types. +// +// Deprecated: Well-known types have specialized functionality directly +// injected into the generated packages for each message type. +// See the deprecation notice for each function for the suggested alternative. package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go index 6110ae8a4..b2b55dd85 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -21,6 +21,8 @@ const ( // Duration converts a durationpb.Duration to a time.Duration. // Duration returns an error if dur is invalid or overflows a time.Duration. +// +// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. func Duration(dur *durationpb.Duration) (time.Duration, error) { if err := validateDuration(dur); err != nil { return 0, err @@ -39,6 +41,8 @@ func Duration(dur *durationpb.Duration) (time.Duration, error) { } // DurationProto converts a time.Duration to a durationpb.Duration. +// +// Deprecated: Call the durationpb.New function instead. func DurationProto(d time.Duration) *durationpb.Duration { nanos := d.Nanoseconds() secs := nanos / 1e9 diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 026d0d491..8368a3f70 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -33,6 +33,8 @@ const ( // // A nil Timestamp returns an error. The first return value in that case is // undefined. +// +// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { // Don't return the zero value on error, because corresponds to a valid // timestamp. Instead return whatever time.Unix gives us. @@ -46,6 +48,8 @@ func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { } // TimestampNow returns a google.protobuf.Timestamp for the current time. +// +// Deprecated: Call the timestamppb.Now function instead. func TimestampNow() *timestamppb.Timestamp { ts, err := TimestampProto(time.Now()) if err != nil { @@ -56,6 +60,8 @@ func TimestampNow() *timestamppb.Timestamp { // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. +// +// Deprecated: Call the timestamppb.New function instead. func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { ts := ×tamppb.Timestamp{ Seconds: t.Unix(), @@ -69,6 +75,9 @@ func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { // TimestampString returns the RFC 3339 string for valid Timestamps. // For invalid Timestamps, it returns an error message in parentheses. +// +// Deprecated: Call the ts.AsTime method instead, +// followed by a call to the Format method on the time.Time value. func TimestampString(ts *timestamppb.Timestamp) string { t, err := Timestamp(ts) if err != nil { diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS index bcfa19520..52ccb5a93 100644 --- a/vendor/github.com/golang/snappy/AUTHORS +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -8,8 +8,11 @@ # Please keep the list sorted. +Amazon.com, Inc Damian Gryski +Eric Buth Google Inc. Jan Mercl <0xjnml@gmail.com> +Klaus Post Rodolfo Carvalho Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS index 931ae3160..ea6524ddd 100644 --- a/vendor/github.com/golang/snappy/CONTRIBUTORS +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -26,9 +26,13 @@ # Please keep the list sorted. +Alex Legg Damian Gryski +Eric Buth Jan Mercl <0xjnml@gmail.com> +Jonathan Swinney Kai Backman +Klaus Post Marc-Antoine Ruel Nigel Tao Rob Pike diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go index 72efb0353..23c6e26c6 100644 --- a/vendor/github.com/golang/snappy/decode.go +++ b/vendor/github.com/golang/snappy/decode.go @@ -52,6 +52,8 @@ const ( // Otherwise, a newly allocated slice will be returned. // // The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. func Decode(dst, src []byte) ([]byte, error) { dLen, s, err := decodedLen(src) if err != nil { @@ -83,6 +85,8 @@ func NewReader(r io.Reader) *Reader { } // Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. type Reader struct { r io.Reader err error @@ -114,32 +118,23 @@ func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { return true } -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } +func (r *Reader) fill() error { + for r.i >= r.j { if !r.readFull(r.buf[:4], true) { - return 0, r.err + return r.err } chunkType := r.buf[0] if !r.readHeader { if chunkType != chunkTypeStreamIdentifier { r.err = ErrCorrupt - return 0, r.err + return r.err } r.readHeader = true } chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 if chunkLen > len(r.buf) { r.err = ErrUnsupported - return 0, r.err + return r.err } // The chunk types are specified at @@ -149,11 +144,11 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.2. Compressed data (chunk type 0x00). if chunkLen < checksumSize { r.err = ErrCorrupt - return 0, r.err + return r.err } buf := r.buf[:chunkLen] if !r.readFull(buf, false) { - return 0, r.err + return r.err } checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 buf = buf[checksumSize:] @@ -161,19 +156,19 @@ func (r *Reader) Read(p []byte) (int, error) { n, err := DecodedLen(buf) if err != nil { r.err = err - return 0, r.err + return r.err } if n > len(r.decoded) { r.err = ErrCorrupt - return 0, r.err + return r.err } if _, err := Decode(r.decoded, buf); err != nil { r.err = err - return 0, r.err + return r.err } if crc(r.decoded[:n]) != checksum { r.err = ErrCorrupt - return 0, r.err + return r.err } r.i, r.j = 0, n continue @@ -182,25 +177,25 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.3. Uncompressed data (chunk type 0x01). if chunkLen < checksumSize { r.err = ErrCorrupt - return 0, r.err + return r.err } buf := r.buf[:checksumSize] if !r.readFull(buf, false) { - return 0, r.err + return r.err } checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 // Read directly into r.decoded instead of via r.buf. n := chunkLen - checksumSize if n > len(r.decoded) { r.err = ErrCorrupt - return 0, r.err + return r.err } if !r.readFull(r.decoded[:n], false) { - return 0, r.err + return r.err } if crc(r.decoded[:n]) != checksum { r.err = ErrCorrupt - return 0, r.err + return r.err } r.i, r.j = 0, n continue @@ -209,15 +204,15 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.1. Stream identifier (chunk type 0xff). if chunkLen != len(magicBody) { r.err = ErrCorrupt - return 0, r.err + return r.err } if !r.readFull(r.buf[:len(magicBody)], false) { - return 0, r.err + return r.err } for i := 0; i < len(magicBody); i++ { if r.buf[i] != magicBody[i] { r.err = ErrCorrupt - return 0, r.err + return r.err } } continue @@ -226,12 +221,44 @@ func (r *Reader) Read(p []byte) (int, error) { if chunkType <= 0x7f { // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). r.err = ErrUnsupported - return 0, r.err + return r.err } // Section 4.4 Padding (chunk type 0xfe). // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). if !r.readFull(r.buf[:chunkLen], false) { - return 0, r.err + return r.err } } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil } diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go deleted file mode 100644 index fcd192b84..000000000 --- a/vendor/github.com/golang/snappy/decode_amd64.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -package snappy - -// decode has the same semantics as in decode_other.go. -// -//go:noescape -func decode(dst, src []byte) int diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_arm64.s similarity index 67% rename from vendor/github.com/klauspost/compress/snappy/decode_amd64.s rename to vendor/github.com/golang/snappy/decode_arm64.s index 1c66e3723..7a3ead17e 100644 --- a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s +++ b/vendor/github.com/golang/snappy/decode_arm64.s @@ -1,4 +1,4 @@ -// Copyright 2016 The Go Authors. All rights reserved. +// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -15,12 +15,12 @@ // // All local variables fit into registers. The non-zero stack size is only to // spill registers and push args when issuing a CALL. The register allocation: -// - AX scratch -// - BX scratch -// - CX length or x -// - DX offset -// - SI &src[s] -// - DI &dst[d] +// - R2 scratch +// - R3 scratch +// - R4 length or x +// - R5 offset +// - R6 &src[s] +// - R7 &dst[d] // + R8 dst_base // + R9 dst_len // + R10 dst_base + dst_len @@ -33,34 +33,35 @@ // The registers R8-R13 (marked with a "+") are set at the start of the // function, and after a CALL returns, and are not otherwise modified. // -// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. -// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. -TEXT ·decode(SB), NOSPLIT, $48-56 - // Initialize SI, DI and R8-R13. - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, DI - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, SI - MOVQ R11, R13 - ADDQ R12, R13 +// The d variable is implicitly R7 - R8, and len(dst)-d is R10 - R7. +// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6. +TEXT ·decode(SB), NOSPLIT, $56-56 + // Initialize R6, R7 and R8-R13. + MOVD dst_base+0(FP), R8 + MOVD dst_len+8(FP), R9 + MOVD R8, R7 + MOVD R8, R10 + ADD R9, R10, R10 + MOVD src_base+24(FP), R11 + MOVD src_len+32(FP), R12 + MOVD R11, R6 + MOVD R11, R13 + ADD R12, R13, R13 loop: // for s < len(src) - CMPQ SI, R13 - JEQ end + CMP R13, R6 + BEQ end - // CX = uint32(src[s]) + // R4 = uint32(src[s]) // // switch src[s] & 0x03 - MOVBLZX (SI), CX - MOVL CX, BX - ANDL $3, BX - CMPL BX, $1 - JAE tagCopy + MOVBU (R6), R4 + MOVW R4, R3 + ANDW $3, R3 + MOVW $1, R1 + CMPW R1, R3 + BGE tagCopy // ---------------------------------------- // The code below handles literal tags. @@ -68,35 +69,36 @@ loop: // case tagLiteral: // x := uint32(src[s] >> 2) // switch - SHRL $2, CX - CMPL CX, $60 - JAE tagLit60Plus + MOVW $60, R1 + LSRW $2, R4, R4 + CMPW R4, R1 + BLS tagLit60Plus // case x < 60: // s++ - INCQ SI + ADD $1, R6, R6 doLit: // This is the end of the inner "switch", when we have a literal tag. // - // We assume that CX == x and x fits in a uint32, where x is the variable + // We assume that R4 == x and x fits in a uint32, where x is the variable // used in the pure Go decode_other.go code. // length = int(x) + 1 // // Unlike the pure Go code, we don't need to check if length <= 0 because - // CX can hold 64 bits, so the increment cannot overflow. - INCQ CX + // R4 can hold 64 bits, so the increment cannot overflow. + ADD $1, R4, R4 // Prepare to check if copying length bytes will run past the end of dst or // src. // - // AX = len(dst) - d - // BX = len(src) - s - MOVQ R10, AX - SUBQ DI, AX - MOVQ R13, BX - SUBQ SI, BX + // R2 = len(dst) - d + // R3 = len(src) - s + MOVD R10, R2 + SUB R7, R2, R2 + MOVD R13, R3 + SUB R6, R3, R3 // !!! Try a faster technique for short (16 or fewer bytes) copies. // @@ -109,12 +111,12 @@ doLit: // is contiguous in memory and so it needs to leave enough source bytes to // read the next tag without refilling buffers, but Go's Decode assumes // contiguousness (the src argument is a []byte). - CMPQ CX, $16 - JGT callMemmove - CMPQ AX, $16 - JLT callMemmove - CMPQ BX, $16 - JLT callMemmove + CMP $16, R4 + BGT callMemmove + CMP $16, R2 + BLT callMemmove + CMP $16, R3 + BLT callMemmove // !!! Implement the copy from src to dst as a 16-byte load and store. // (Decode's documentation says that dst and src must not overlap.) @@ -124,57 +126,57 @@ doLit: // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a // non-nil error), so the overrun will be ignored. // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or // 16-byte loads and stores. This technique probably wouldn't be as // effective on architectures that are fussier about alignment. - MOVOU 0(SI), X0 - MOVOU X0, 0(DI) + LDP 0(R6), (R14, R15) + STP (R14, R15), 0(R7) // d += length // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop + ADD R4, R7, R7 + ADD R4, R6, R6 + B loop callMemmove: // if length > len(dst)-d || length > len(src)-s { etc } - CMPQ CX, AX - JGT errCorrupt - CMPQ CX, BX - JGT errCorrupt + CMP R2, R4 + BGT errCorrupt + CMP R3, R4 + BGT errCorrupt // copy(dst[d:], src[s:s+length]) // // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // R7, R6 and R4 as arguments. Coincidentally, we also need to spill those // three registers to the stack, to save local variables across the CALL. - MOVQ DI, 0(SP) - MOVQ SI, 8(SP) - MOVQ CX, 16(SP) - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) + MOVD R7, 8(RSP) + MOVD R6, 16(RSP) + MOVD R4, 24(RSP) + MOVD R7, 32(RSP) + MOVD R6, 40(RSP) + MOVD R4, 48(RSP) CALL runtime·memmove(SB) // Restore local variables: unspill registers from the stack and // re-calculate R8-R13. - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, R13 - ADDQ R12, R13 + MOVD 32(RSP), R7 + MOVD 40(RSP), R6 + MOVD 48(RSP), R4 + MOVD dst_base+0(FP), R8 + MOVD dst_len+8(FP), R9 + MOVD R8, R10 + ADD R9, R10, R10 + MOVD src_base+24(FP), R11 + MOVD src_len+32(FP), R12 + MOVD R11, R13 + ADD R12, R13, R13 // d += length // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop + ADD R4, R7, R7 + ADD R4, R6, R6 + B loop tagLit60Plus: // !!! This fragment does the @@ -182,142 +184,150 @@ tagLit60Plus: // s += x - 58; if uint(s) > uint(len(src)) { etc } // // checks. In the asm version, we code it once instead of once per switch case. - ADDQ CX, SI - SUBQ $58, SI - CMPQ SI, R13 - JA errCorrupt + ADD R4, R6, R6 + SUB $58, R6, R6 + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt // case x == 60: - CMPL CX, $61 - JEQ tagLit61 - JA tagLit62Plus + MOVW $61, R1 + CMPW R1, R4 + BEQ tagLit61 + BGT tagLit62Plus // x = uint32(src[s-1]) - MOVBLZX -1(SI), CX - JMP doLit + MOVBU -1(R6), R4 + B doLit tagLit61: // case x == 61: // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVWLZX -2(SI), CX - JMP doLit + MOVHU -2(R6), R4 + B doLit tagLit62Plus: - CMPL CX, $62 - JA tagLit63 + CMPW $62, R4 + BHI tagLit63 // case x == 62: // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVWLZX -3(SI), CX - MOVBLZX -1(SI), BX - SHLL $16, BX - ORL BX, CX - JMP doLit + MOVHU -3(R6), R4 + MOVBU -1(R6), R3 + ORR R3<<16, R4 + B doLit tagLit63: // case x == 63: // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVL -4(SI), CX - JMP doLit + MOVWU -4(R6), R4 + B doLit -// The code above handles literal tags. -// ---------------------------------------- -// The code below handles copy tags. + // The code above handles literal tags. + // ---------------------------------------- + // The code below handles copy tags. tagCopy4: // case tagCopy4: // s += 5 - ADDQ $5, SI + ADD $5, R6, R6 // if uint(s) > uint(len(src)) { etc } - CMPQ SI, R13 - JA errCorrupt + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt // length = 1 + int(src[s-5])>>2 - SHRQ $2, CX - INCQ CX + MOVD $1, R1 + ADD R4>>2, R1, R4 // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVLQZX -4(SI), DX - JMP doCopy + MOVWU -4(R6), R5 + B doCopy tagCopy2: // case tagCopy2: // s += 3 - ADDQ $3, SI + ADD $3, R6, R6 // if uint(s) > uint(len(src)) { etc } - CMPQ SI, R13 - JA errCorrupt + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt // length = 1 + int(src[s-3])>>2 - SHRQ $2, CX - INCQ CX + MOVD $1, R1 + ADD R4>>2, R1, R4 // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVWQZX -2(SI), DX - JMP doCopy + MOVHU -2(R6), R5 + B doCopy tagCopy: // We have a copy tag. We assume that: - // - BX == src[s] & 0x03 - // - CX == src[s] - CMPQ BX, $2 - JEQ tagCopy2 - JA tagCopy4 + // - R3 == src[s] & 0x03 + // - R4 == src[s] + CMP $2, R3 + BEQ tagCopy2 + BGT tagCopy4 // case tagCopy1: // s += 2 - ADDQ $2, SI + ADD $2, R6, R6 // if uint(s) > uint(len(src)) { etc } - CMPQ SI, R13 - JA errCorrupt + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVQ CX, DX - ANDQ $0xe0, DX - SHLQ $3, DX - MOVBQZX -1(SI), BX - ORQ BX, DX + MOVD R4, R5 + AND $0xe0, R5 + MOVBU -1(R6), R3 + ORR R5<<3, R3, R5 // length = 4 + int(src[s-2])>>2&0x7 - SHRQ $2, CX - ANDQ $7, CX - ADDQ $4, CX + MOVD $7, R1 + AND R4>>2, R1, R4 + ADD $4, R4, R4 doCopy: // This is the end of the outer "switch", when we have a copy tag. // // We assume that: - // - CX == length && CX > 0 - // - DX == offset + // - R4 == length && R4 > 0 + // - R5 == offset // if offset <= 0 { etc } - CMPQ DX, $0 - JLE errCorrupt + MOVD $0, R1 + CMP R1, R5 + BLE errCorrupt // if d < offset { etc } - MOVQ DI, BX - SUBQ R8, BX - CMPQ BX, DX - JLT errCorrupt + MOVD R7, R3 + SUB R8, R3, R3 + CMP R5, R3 + BLT errCorrupt // if length > len(dst)-d { etc } - MOVQ R10, BX - SUBQ DI, BX - CMPQ CX, BX - JGT errCorrupt + MOVD R10, R3 + SUB R7, R3, R3 + CMP R3, R4 + BGT errCorrupt // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length // // Set: // - R14 = len(dst)-d // - R15 = &dst[d-offset] - MOVQ R10, R14 - SUBQ DI, R14 - MOVQ DI, R15 - SUBQ DX, R15 + MOVD R10, R14 + SUB R7, R14, R14 + MOVD R7, R15 + SUB R5, R15, R15 // !!! Try a faster technique for short (16 or fewer bytes) forward copies. // @@ -332,18 +342,18 @@ doCopy: // } // copy 16 bytes // d += length - CMPQ CX, $16 - JGT slowForwardCopy - CMPQ DX, $8 - JLT slowForwardCopy - CMPQ R14, $16 - JLT slowForwardCopy - MOVQ 0(R15), AX - MOVQ AX, 0(DI) - MOVQ 8(R15), BX - MOVQ BX, 8(DI) - ADDQ CX, DI - JMP loop + CMP $16, R4 + BGT slowForwardCopy + CMP $8, R5 + BLT slowForwardCopy + CMP $16, R14 + BLT slowForwardCopy + MOVD 0(R15), R2 + MOVD R2, 0(R7) + MOVD 8(R15), R3 + MOVD R3, 8(R7) + ADD R4, R7, R7 + B loop slowForwardCopy: // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we @@ -394,9 +404,9 @@ slowForwardCopy: // if length > len(dst)-d-10 { // goto verySlowForwardCopy // } - SUBQ $10, R14 - CMPQ CX, R14 - JGT verySlowForwardCopy + SUB $10, R14, R14 + CMP R14, R4 + BGT verySlowForwardCopy makeOffsetAtLeast8: // !!! As above, expand the pattern so that offset >= 8 and we can use @@ -410,36 +420,37 @@ makeOffsetAtLeast8: // // The two previous lines together means that d-offset, and therefore // // R15, is unchanged. // } - CMPQ DX, $8 - JGE fixUpSlowForwardCopy - MOVQ (R15), BX - MOVQ BX, (DI) - SUBQ DX, CX - ADDQ DX, DI - ADDQ DX, DX - JMP makeOffsetAtLeast8 + CMP $8, R5 + BGE fixUpSlowForwardCopy + MOVD (R15), R3 + MOVD R3, (R7) + SUB R5, R4, R4 + ADD R5, R7, R7 + ADD R5, R5, R5 + B makeOffsetAtLeast8 fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by DI being + // !!! Add length (which might be negative now) to d (implied by R7 being // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save DI to AX so that, if + // top of the loop. Before we do that, though, we save R7 to R2 so that, if // length is positive, copying the remaining length bytes will write to the // right place. - MOVQ DI, AX - ADDQ CX, DI + MOVD R7, R2 + ADD R4, R7, R7 finishSlowForwardCopy: // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative // length means that we overrun, but as above, that will be fixed up by // subsequent iterations of the outermost loop. - CMPQ CX, $0 - JLE loop - MOVQ (R15), BX - MOVQ BX, (AX) - ADDQ $8, R15 - ADDQ $8, AX - SUBQ $8, CX - JMP finishSlowForwardCopy + MOVD $0, R1 + CMP R1, R4 + BLE loop + MOVD (R15), R3 + MOVD R3, (R2) + ADD $8, R15, R15 + ADD $8, R2, R2 + SUB $8, R4, R4 + B finishSlowForwardCopy verySlowForwardCopy: // verySlowForwardCopy is a simple implementation of forward copy. In C @@ -454,29 +465,30 @@ verySlowForwardCopy: // break // } // } - MOVB (R15), BX - MOVB BX, (DI) - INCQ R15 - INCQ DI - DECQ CX - JNZ verySlowForwardCopy - JMP loop - -// The code above handles copy tags. -// ---------------------------------------- + MOVB (R15), R3 + MOVB R3, (R7) + ADD $1, R15, R15 + ADD $1, R7, R7 + SUB $1, R4, R4 + CBNZ R4, verySlowForwardCopy + B loop + + // The code above handles copy tags. + // ---------------------------------------- end: // This is the end of the "for s < len(src)". // // if d != len(dst) { etc } - CMPQ DI, R10 - JNE errCorrupt + CMP R10, R7 + BNE errCorrupt // return 0 - MOVQ $0, ret+48(FP) + MOVD $0, ret+48(FP) RET errCorrupt: // return decodeErrCodeCorrupt - MOVQ $1, ret+48(FP) + MOVD $1, R2 + MOVD R2, ret+48(FP) RET diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_asm.go similarity index 93% rename from vendor/github.com/klauspost/compress/snappy/decode_amd64.go rename to vendor/github.com/golang/snappy/decode_asm.go index fcd192b84..7082b3491 100644 --- a/vendor/github.com/klauspost/compress/snappy/decode_amd64.go +++ b/vendor/github.com/golang/snappy/decode_asm.go @@ -5,6 +5,7 @@ // +build !appengine // +build gc // +build !noasm +// +build amd64 arm64 package snappy diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go index 8c9f2049b..2f672be55 100644 --- a/vendor/github.com/golang/snappy/decode_other.go +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 appengine !gc noasm +// +build !amd64,!arm64 appengine !gc noasm package snappy @@ -85,14 +85,28 @@ func decode(dst, src []byte) int { if offset <= 0 || d < offset || length > len(dst)-d { return decodeErrCodeCorrupt } - // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike - // the built-in copy function, this byte-by-byte copy always runs + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs // forwards, even if the slices overlap. Conceptually, this is: // // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - for end := d + length; d != end; d++ { - dst[d] = dst[d-offset] + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] } + d += length } if d != len(dst) { return decodeErrCodeCorrupt diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go index 8d393e904..7f2365707 100644 --- a/vendor/github.com/golang/snappy/encode.go +++ b/vendor/github.com/golang/snappy/encode.go @@ -15,6 +15,8 @@ import ( // Otherwise, a newly allocated slice will be returned. // // The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. func Encode(dst, src []byte) []byte { if n := MaxEncodedLen(len(src)); n < 0 { panic(ErrTooLarge) @@ -139,6 +141,8 @@ func NewBufferedWriter(w io.Writer) *Writer { } // Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. type Writer struct { w io.Writer err error diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s new file mode 100644 index 000000000..f8d54adfc --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_arm64.s @@ -0,0 +1,722 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - R3 len(lit) +// - R4 n +// - R6 return value +// - R8 &dst[i] +// - R10 &lit[0] +// +// The 32 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $32-56 + MOVD dst_base+0(FP), R8 + MOVD lit_base+24(FP), R10 + MOVD lit_len+32(FP), R3 + MOVD R3, R6 + MOVW R3, R4 + SUBW $1, R4, R4 + + CMPW $60, R4 + BLT oneByte + CMPW $256, R4 + BLT twoBytes + +threeBytes: + MOVD $0xf4, R2 + MOVB R2, 0(R8) + MOVW R4, 1(R8) + ADD $3, R8, R8 + ADD $3, R6, R6 + B memmove + +twoBytes: + MOVD $0xf0, R2 + MOVB R2, 0(R8) + MOVB R4, 1(R8) + ADD $2, R8, R8 + ADD $2, R6, R6 + B memmove + +oneByte: + LSLW $2, R4, R4 + MOVB R4, 0(R8) + ADD $1, R8, R8 + ADD $1, R6, R6 + +memmove: + MOVD R6, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // R8, R10 and R3 as arguments. + MOVD R8, 8(RSP) + MOVD R10, 16(RSP) + MOVD R3, 24(RSP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - R3 length +// - R7 &dst[0] +// - R8 &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVD dst_base+0(FP), R8 + MOVD R8, R7 + MOVD offset+24(FP), R11 + MOVD length+32(FP), R3 + +loop0: + // for length >= 68 { etc } + CMPW $68, R3 + BLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVD $0xfe, R2 + MOVB R2, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUB $64, R3, R3 + B loop0 + +step1: + // if length > 64 { etc } + CMP $64, R3 + BLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVD $0xee, R2 + MOVB R2, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUB $60, R3, R3 + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMP $12, R3 + BGE step3 + CMPW $2048, R11 + BGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(R8) + LSRW $3, R11, R11 + AND $0xe0, R11, R11 + SUB $4, R3, R3 + LSLW $2, R3 + AND $0xff, R3, R3 + ORRW R3, R11, R11 + ORRW $1, R11, R11 + MOVB R11, 0(R8) + ADD $2, R8, R8 + + // Return the number of bytes written. + SUB R7, R8, R8 + MOVD R8, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUB $1, R3, R3 + AND $0xff, R3, R3 + LSLW $2, R3, R3 + ORRW $2, R3, R3 + MOVB R3, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + + // Return the number of bytes written. + SUB R7, R8, R8 + MOVD R8, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - R6 &src[0] +// - R7 &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVD src_base+0(FP), R6 + MOVD src_len+8(FP), R14 + MOVD i+24(FP), R15 + MOVD j+32(FP), R7 + ADD R6, R14, R14 + ADD R6, R15, R15 + ADD R6, R7, R7 + MOVD R14, R13 + SUB $8, R13, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMP R13, R7 + BHI cmp1 + MOVD (R15), R3 + MOVD (R7), R4 + CMP R4, R3 + BNE bsf + ADD $8, R15, R15 + ADD $8, R7, R7 + B cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. + // RBIT reverses the bit order, then CLZ counts the leading zeros, the + // combination of which finds the least significant bit which is set. + // The arm64 architecture is little-endian, and the shift by 3 converts + // a bit index to a byte index. + EOR R3, R4, R4 + RBIT R4, R4 + CLZ R4, R4 + ADD R4>>3, R7, R7 + + // Convert from &src[ret] to ret. + SUB R6, R7, R7 + MOVD R7, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMP R7, R14 + BLS extendMatchEnd + MOVB (R15), R3 + MOVB (R7), R4 + CMP R4, R3 + BNE extendMatchEnd + ADD $1, R15, R15 + ADD $1, R7, R7 + B cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUB R6, R7, R7 + MOVD R7, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - R3 . . +// - R4 . . +// - R5 64 shift +// - R6 72 &src[0], tableSize +// - R7 80 &src[s] +// - R8 88 &dst[d] +// - R9 96 sLimit +// - R10 . &src[nextEmit] +// - R11 104 prevHash, currHash, nextHash, offset +// - R12 112 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 120 candidate +// - R16 . hash constant, 0x1e35a7bd +// - R17 . &table +// - . 128 table +// +// The second column (64, 72, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 64 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 64 + 64 = 32896. +TEXT ·encodeBlock(SB), 0, $32896-56 + MOVD dst_base+0(FP), R8 + MOVD src_base+24(FP), R7 + MOVD src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVD $24, R5 + MOVD $256, R6 + MOVW $0xa7bd, R16 + MOVKW $(0x1e35<<16), R16 + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + MOVD $16384, R2 + CMP R2, R6 + BGE varTable + CMP R14, R6 + BGE varTable + SUB $1, R5, R5 + LSL $1, R6, R6 + B calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each + // iterations writes 64 bytes, so we can do only tableSize/32 writes + // instead of the 2048 writes that would zero-initialize all of table's + // 32768 bytes. This clear could overrun the first tableSize elements, but + // it won't overrun the allocated stack size. + ADD $128, RSP, R17 + MOVD R17, R4 + + // !!! R6 = &src[tableSize] + ADD R6<<1, R17, R6 + +memclr: + STP.P (ZR, ZR), 64(R4) + STP (ZR, ZR), -48(R4) + STP (ZR, ZR), -32(R4) + STP (ZR, ZR), -16(R4) + CMP R4, R6 + BHI memclr + + // !!! R6 = &src[0] + MOVD R7, R6 + + // sLimit := len(src) - inputMargin + MOVD R14, R9 + SUB $15, R9, R9 + + // !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't + // change for the rest of the function. + MOVD R5, 64(RSP) + MOVD R6, 72(RSP) + MOVD R9, 96(RSP) + + // nextEmit := 0 + MOVD R6, R10 + + // s := 1 + ADD $1, R7, R7 + + // nextHash := hash(load32(src, s), shift) + MOVW 0(R7), R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + +outer: + // for { etc } + + // skip := 32 + MOVD $32, R12 + + // nextS := s + MOVD R7, R13 + + // candidate := 0 + MOVD $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVD R13, R7 + + // bytesBetweenHashLookups := skip >> 5 + MOVD R12, R14 + LSR $5, R14, R14 + + // nextS = s + bytesBetweenHashLookups + ADD R14, R13, R13 + + // skip += bytesBetweenHashLookups + ADD R14, R12, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVD R13, R3 + SUB R6, R3, R3 + CMP R9, R3 + BHI emitRemainder + + // candidate = int(table[nextHash]) + MOVHU 0(R17)(R11<<1), R15 + + // table[nextHash] = uint16(s) + MOVD R7, R3 + SUB R6, R3, R3 + + MOVH R3, 0(R17)(R11<<1) + + // nextHash = hash(load32(src, nextS), shift) + MOVW 0(R13), R11 + MULW R16, R11 + LSRW R5, R11, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVW 0(R7), R3 + MOVW (R6)(R15), R4 + CMPW R4, R3 + BNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVD R7, R3 + SUB R10, R3, R3 + CMP $16, R3 + BLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVW R3, R4 + SUBW $1, R4, R4 + + MOVW $60, R2 + CMPW R2, R4 + BLT inlineEmitLiteralOneByte + MOVW $256, R2 + CMPW R2, R4 + BLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVD $0xf4, R1 + MOVB R1, 0(R8) + MOVW R4, 1(R8) + ADD $3, R8, R8 + B inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVD $0xf0, R1 + MOVB R1, 0(R8) + MOVB R4, 1(R8) + ADD $2, R8, R8 + B inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + LSLW $2, R4, R4 + MOVB R4, 0(R8) + ADD $1, R8, R8 + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // R8, R10 and R3 as arguments. + MOVD R8, 8(RSP) + MOVD R10, 16(RSP) + MOVD R3, 24(RSP) + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADD R3, R8, R8 + MOVD R7, 80(RSP) + MOVD R8, 88(RSP) + MOVD R15, 120(RSP) + CALL runtime·memmove(SB) + MOVD 64(RSP), R5 + MOVD 72(RSP), R6 + MOVD 80(RSP), R7 + MOVD 88(RSP), R8 + MOVD 96(RSP), R9 + MOVD 120(RSP), R15 + ADD $128, RSP, R17 + MOVW $0xa7bd, R16 + MOVKW $(0x1e35<<16), R16 + B inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB R3, R4 + SUBW $1, R4, R4 + AND $0xff, R4, R4 + LSLW $2, R4, R4 + MOVB R4, (R8) + ADD $1, R8, R8 + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + LDP 0(R10), (R0, R1) + STP (R0, R1), 0(R8) + ADD R3, R8, R8 + +inner1: + // for { etc } + + // base := s + MOVD R7, R12 + + // !!! offset := base - candidate + MOVD R12, R11 + SUB R15, R11, R11 + SUB R6, R11, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVD src_len+32(FP), R14 + ADD R6, R14, R14 + + // !!! R13 = &src[len(src) - 8] + MOVD R14, R13 + SUB $8, R13, R13 + + // !!! R15 = &src[candidate + 4] + ADD $4, R15, R15 + ADD R6, R15, R15 + + // !!! s += 4 + ADD $4, R7, R7 + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMP R13, R7 + BHI inlineExtendMatchCmp1 + MOVD (R15), R3 + MOVD (R7), R4 + CMP R4, R3 + BNE inlineExtendMatchBSF + ADD $8, R15, R15 + ADD $8, R7, R7 + B inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. + // RBIT reverses the bit order, then CLZ counts the leading zeros, the + // combination of which finds the least significant bit which is set. + // The arm64 architecture is little-endian, and the shift by 3 converts + // a bit index to a byte index. + EOR R3, R4, R4 + RBIT R4, R4 + CLZ R4, R4 + ADD R4>>3, R7, R7 + B inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMP R7, R14 + BLS inlineExtendMatchEnd + MOVB (R15), R3 + MOVB (R7), R4 + CMP R4, R3 + BNE inlineExtendMatchEnd + ADD $1, R15, R15 + ADD $1, R7, R7 + B inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVD R7, R3 + SUB R12, R3, R3 + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + MOVW $68, R2 + CMPW R2, R3 + BLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVD $0xfe, R1 + MOVB R1, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUBW $64, R3, R3 + B inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + MOVW $64, R2 + CMPW R2, R3 + BLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVD $0xee, R1 + MOVB R1, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUBW $60, R3, R3 + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + MOVW $12, R2 + CMPW R2, R3 + BGE inlineEmitCopyStep3 + MOVW $2048, R2 + CMPW R2, R11 + BGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(R8) + LSRW $8, R11, R11 + LSLW $5, R11, R11 + SUBW $4, R3, R3 + AND $0xff, R3, R3 + LSLW $2, R3, R3 + ORRW R3, R11, R11 + ORRW $1, R11, R11 + MOVB R11, 0(R8) + ADD $2, R8, R8 + B inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBW $1, R3, R3 + LSLW $2, R3, R3 + ORRW $2, R3, R3 + MOVB R3, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVD R7, R10 + + // if s >= sLimit { goto emitRemainder } + MOVD R7, R3 + SUB R6, R3, R3 + CMP R3, R9 + BLS emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVD -1(R7), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // table[prevHash] = uint16(s-1) + MOVD R7, R3 + SUB R6, R3, R3 + SUB $1, R3, R3 + + MOVHU R3, 0(R17)(R11<<1) + + // currHash := hash(uint32(x>>8), shift) + LSR $8, R14, R14 + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // candidate = int(table[currHash]) + MOVHU 0(R17)(R11<<1), R15 + + // table[currHash] = uint16(s) + ADD $1, R3, R3 + MOVHU R3, 0(R17)(R11<<1) + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVW (R6)(R15), R4 + CMPW R4, R14 + BEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + LSR $8, R14, R14 + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // s++ + ADD $1, R7, R7 + + // break out of the inner1 for loop, i.e. continue the outer loop. + B outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVD src_len+32(FP), R3 + ADD R6, R3, R3 + CMP R3, R10 + BEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVD R8, 8(RSP) + MOVD $0, 16(RSP) // Unnecessary, as the callee ignores it, but conservative. + MOVD $0, 24(RSP) // Unnecessary, as the callee ignores it, but conservative. + MOVD R10, 32(RSP) + SUB R10, R3, R3 + MOVD R3, 40(RSP) + MOVD R3, 48(RSP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVD R8, 88(RSP) + CALL ·emitLiteral(SB) + MOVD 88(RSP), R8 + + // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVD 56(RSP), R1 + ADD R1, R8, R8 + +encodeBlockEnd: + MOVD dst_base+0(FP), R3 + SUB R3, R8, R8 + MOVD R8, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_asm.go similarity index 97% rename from vendor/github.com/golang/snappy/encode_amd64.go rename to vendor/github.com/golang/snappy/encode_asm.go index 150d91bc8..107c1e714 100644 --- a/vendor/github.com/golang/snappy/encode_amd64.go +++ b/vendor/github.com/golang/snappy/encode_asm.go @@ -5,6 +5,7 @@ // +build !appengine // +build gc // +build !noasm +// +build amd64 arm64 package snappy diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go index dbcae905e..296d7f0be 100644 --- a/vendor/github.com/golang/snappy/encode_other.go +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 appengine !gc noasm +// +build !amd64,!arm64 appengine !gc noasm package snappy diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index 86d0903b8..fd2b3a42b 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -36,11 +36,12 @@ import ( "strings" "github.com/google/go-cmp/cmp/internal/diff" - "github.com/google/go-cmp/cmp/internal/flags" "github.com/google/go-cmp/cmp/internal/function" "github.com/google/go-cmp/cmp/internal/value" ) +// TODO(≥go1.18): Use any instead of interface{}. + // Equal reports whether x and y are equal by recursively applying the // following rules in the given order to x and y and all of their sub-values: // @@ -319,7 +320,6 @@ func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { } func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { - v = sanitizeValue(v, f.Type().In(0)) if !s.dynChecker.Next() { return f.Call([]reflect.Value{v})[0] } @@ -343,8 +343,6 @@ func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { } func (s *state) callTTBFunc(f, x, y reflect.Value) bool { - x = sanitizeValue(x, f.Type().In(0)) - y = sanitizeValue(y, f.Type().In(1)) if !s.dynChecker.Next() { return f.Call([]reflect.Value{x, y})[0].Bool() } @@ -372,19 +370,6 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { ret = f.Call(vs)[0] } -// sanitizeValue converts nil interfaces of type T to those of type R, -// assuming that T is assignable to R. -// Otherwise, it returns the input value as is. -func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { - // TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143). - if !flags.AtLeastGo110 { - if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { - return reflect.New(t).Elem() - } - } - return v -} - func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { var addr bool var vax, vay reflect.Value // Addressable versions of vx and vy diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go index 5ff0b4218..ae851fe53 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_panic.go +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego // +build purego package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go index 21eb54858..e2c0f74e8 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego // +build !purego package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go index 1daaaacc5..36062a604 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !cmp_debug // +build !cmp_debug package diff diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go index 4b91dbcac..a3b97a1ad 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build cmp_debug // +build cmp_debug package diff diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go deleted file mode 100644 index 82d1d7fbf..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.10 - -package flags - -// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. -const AtLeastGo110 = false diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go deleted file mode 100644 index 8646f0529..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.10 - -package flags - -// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. -const AtLeastGo110 = true diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go index b6c12cefb..7b498bb2c 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go @@ -9,6 +9,8 @@ import ( "strconv" ) +var anyType = reflect.TypeOf((*interface{})(nil)).Elem() + // TypeString is nearly identical to reflect.Type.String, // but has an additional option to specify that full type names be used. func TypeString(t reflect.Type, qualified bool) string { @@ -20,6 +22,11 @@ func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte // of the same name and within the same package, // but declared within the namespace of different functions. + // Use the "any" alias instead of "interface{}" for better readability. + if t == anyType { + return append(b, "any"...) + } + // Named type. if t.Name() != "" { if qualified && t.PkgPath() != "" { diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go index 44f4a5afd..1a71bfcbd 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego // +build purego package value diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go index a605953d4..16e6860af 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego // +build !purego package value diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index f01eff318..c71003463 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -178,7 +178,7 @@ type structField struct { unexported bool mayForce bool // Forcibly allow visibility paddr bool // Was parent addressable? - pvx, pvy reflect.Value // Parent values (always addressible) + pvx, pvy reflect.Value // Parent values (always addressable) field reflect.StructField // Field information } diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index 104bb3053..1ef65ac1d 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -116,7 +116,10 @@ func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out } // For leaf nodes, format the value based on the reflect.Values alone. - if v.MaxDepth == 0 { + // As a special case, treat equal []byte as a leaf nodes. + isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == reflect.TypeOf(byte(0)) + isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0 + if v.MaxDepth == 0 || isEqualBytes { switch opts.DiffMode { case diffUnknown, diffIdentical: // Format Equal. diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 33f03577f..287b89358 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -207,10 +207,11 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, // Check whether this is a []byte of text data. if t.Elem() == reflect.TypeOf(byte(0)) { b := v.Bytes() - isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) } + isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) } if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { out = opts.formatString("", string(b)) - return opts.WithTypeMode(emitType).FormatType(t, out) + skipType = true + return opts.FormatType(t, out) } } @@ -281,7 +282,12 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } defer ptrs.Pop() - skipType = true // Let the underlying value print the type instead + // Skip the name only if this is an unnamed pointer type. + // Otherwise taking the address of a value does not reproduce + // the named pointer type. + if v.Type().Name() == "" { + skipType = true // Let the underlying value print the type instead + } out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) out = &textWrap{Prefix: "&", Value: out} @@ -292,7 +298,6 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } // Interfaces accept different concrete types, // so configure the underlying value to explicitly print the type. - skipType = true // Print the concrete type instead return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) default: panic(fmt.Sprintf("%v kind not handled", v.Kind())) diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index 2ad3bc85b..68b5c1ae1 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -80,7 +80,7 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { } // Use specialized string diffing for longer slices or strings. - const minLength = 64 + const minLength = 32 return vx.Len() >= minLength && vy.Len() >= minLength } @@ -563,10 +563,10 @@ func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []d nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified ny := ds.NumIdentical + ds.NumInserted + ds.NumModified var numLeadingIdentical, numTrailingIdentical int - for i := 0; i < nx && i < ny && eq(ix+i, iy+i); i++ { + for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ { numLeadingIdentical++ } - for i := 0; i < nx && i < ny && eq(ix+nx-1-i, iy+ny-1-i); i++ { + for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ { numTrailingIdentical++ } if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 { diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go index a733bef18..44e368e56 100644 --- a/vendor/github.com/hashicorp/errwrap/errwrap.go +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -44,6 +44,8 @@ func Wrap(outer, inner error) error { // // format is the format of the error message. The string '{{err}}' will // be replaced with the original error message. +// +// Deprecated: Use fmt.Errorf() func Wrapf(format string, err error) error { outerMsg := "" if err != nil { @@ -148,6 +150,9 @@ func Walk(err error, cb WalkFunc) { for _, err := range e.WrappedErrors() { Walk(err, cb) } + case interface{ Unwrap() error }: + cb(err) + Walk(e.Unwrap(), cb) default: cb(err) } @@ -167,3 +172,7 @@ func (w *wrappedError) Error() string { func (w *wrappedError) WrappedErrors() []error { return []error{w.Outer, w.Inner} } + +func (w *wrappedError) Unwrap() error { + return w.Inner +} diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md index 5d56f4b59..21a17c5af 100644 --- a/vendor/github.com/hashicorp/go-hclog/README.md +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -17,11 +17,8 @@ JSON output mode for production. ## Stability Note -While this library is fully open source and HashiCorp will be maintaining it -(since we are and will be making extensive use of it), the API and output -format is subject to minor changes as we fully bake and vet it in our projects. -This notice will be removed once it's fully integrated into our major projects -and no further changes are anticipated. +This library has reached 1.0 stability. Its API can be considered solidified +and promised through future versions. ## Installation and Docs @@ -102,7 +99,7 @@ into all the callers. ### Using `hclog.Fmt()` ```go -var int totalBandwidth = 200 +totalBandwidth := 200 appLogger.Info("total bandwidth exceeded", "bandwidth", hclog.Fmt("%d GB/s", totalBandwidth)) ``` @@ -146,3 +143,6 @@ log.Printf("[DEBUG] %d", 42) Notice that if `appLogger` is initialized with the `INFO` log level _and_ you specify `InferLevels: true`, you will not see any output here. You must change `appLogger` to `DEBUG` to see output. See the docs for more information. + +If the log lines start with a timestamp you can use the +`InferLevelsWithTimestamp` option to try and ignore them. diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go index 44aa9bf2c..9635c838b 100644 --- a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go +++ b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package hclog @@ -21,6 +22,7 @@ func (l *intLogger) setColorization(opts *LoggerOptions) { isCygwinTerm := isatty.IsCygwinTerminal(fi.Fd()) isTerm := isUnixTerm || isCygwinTerm if !isTerm { + l.headerColor = ColorOff l.writer.color = ColorOff } } diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go index 23486b6d7..30859168e 100644 --- a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go +++ b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package hclog @@ -26,8 +27,12 @@ func (l *intLogger) setColorization(opts *LoggerOptions) { isTerm := isUnixTerm || isCygwinTerm if !isTerm { l.writer.color = ColorOff + l.headerColor = ColorOff return } - l.writer.w = colorable.NewColorable(fi) + + if l.headerColor == ColorOff { + l.writer.w = colorable.NewColorable(fi) + } } } diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go index 22ebc57d8..b9f00217c 100644 --- a/vendor/github.com/hashicorp/go-hclog/global.go +++ b/vendor/github.com/hashicorp/go-hclog/global.go @@ -2,6 +2,7 @@ package hclog import ( "sync" + "time" ) var ( @@ -14,6 +15,7 @@ var ( DefaultOptions = &LoggerOptions{ Level: DefaultLevel, Output: DefaultOutput, + TimeFn: time.Now, } ) diff --git a/vendor/github.com/hashicorp/go-hclog/go.mod b/vendor/github.com/hashicorp/go-hclog/go.mod index b6698c083..3d02298c8 100644 --- a/vendor/github.com/hashicorp/go-hclog/go.mod +++ b/vendor/github.com/hashicorp/go-hclog/go.mod @@ -2,11 +2,11 @@ module github.com/hashicorp/go-hclog require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/fatih/color v1.7.0 - github.com/mattn/go-colorable v0.1.4 - github.com/mattn/go-isatty v0.0.10 - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 + github.com/fatih/color v1.13.0 + github.com/mattn/go-colorable v0.1.12 + github.com/mattn/go-isatty v0.0.14 + github.com/stretchr/testify v1.7.2 + golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 // indirect ) go 1.13 diff --git a/vendor/github.com/hashicorp/go-hclog/go.sum b/vendor/github.com/hashicorp/go-hclog/go.sum index 3a656dfd9..a8dadb218 100644 --- a/vendor/github.com/hashicorp/go-hclog/go.sum +++ b/vendor/github.com/hashicorp/go-hclog/go.sum @@ -1,18 +1,26 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 h1:nonptSpoQ4vQjyraW20DXPAglgQfVnM9ZC6MmNLMR60= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go index 631baf2f0..ff42f1bfc 100644 --- a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go @@ -180,9 +180,10 @@ func (i *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) i func (i *interceptLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { return &stdlogAdapter{ - log: i, - inferLevels: opts.InferLevels, - forceLevel: opts.ForceLevel, + log: i, + inferLevels: opts.InferLevels, + inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp, + forceLevel: opts.ForceLevel, } } diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go index 6099e6726..83232f7a6 100644 --- a/vendor/github.com/hashicorp/go-hclog/intlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -60,6 +60,7 @@ type intLogger struct { callerOffset int name string timeFormat string + timeFn TimeFunction disableTime bool // This is an interface so that it's shared by any derived loggers, since @@ -68,6 +69,8 @@ type intLogger struct { writer *writer level *int32 + headerColor ColorOption + implied []interface{} exclude func(level Level, msg string, args ...interface{}) bool @@ -112,16 +115,28 @@ func newLogger(opts *LoggerOptions) *intLogger { mutex = new(sync.Mutex) } + var primaryColor, headerColor ColorOption + + if opts.ColorHeaderOnly { + primaryColor = ColorOff + headerColor = opts.Color + } else { + primaryColor = opts.Color + headerColor = ColorOff + } + l := &intLogger{ json: opts.JSONFormat, name: opts.Name, timeFormat: TimeFormat, + timeFn: time.Now, disableTime: opts.DisableTime, mutex: mutex, - writer: newWriter(output, opts.Color), + writer: newWriter(output, primaryColor), level: new(int32), exclude: opts.Exclude, independentLevels: opts.IndependentLevels, + headerColor: headerColor, } if opts.IncludeLocation { l.callerOffset = offsetIntLogger + opts.AdditionalLocationOffset @@ -130,6 +145,9 @@ func newLogger(opts *LoggerOptions) *intLogger { if l.json { l.timeFormat = TimeFormatJSON } + if opts.TimeFn != nil { + l.timeFn = opts.TimeFn + } if opts.TimeFormat != "" { l.timeFormat = opts.TimeFormat } @@ -152,7 +170,7 @@ func (l *intLogger) log(name string, level Level, msg string, args ...interface{ return } - t := time.Now() + t := l.timeFn() l.mutex.Lock() defer l.mutex.Unlock() @@ -199,6 +217,24 @@ func trimCallerPath(path string) string { return path[idx+1:] } +// isNormal indicates if the rune is one allowed to exist as an unquoted +// string value. This is a subset of ASCII, `-` through `~`. +func isNormal(r rune) bool { + return 0x2D <= r && r <= 0x7E // - through ~ +} + +// needsQuoting returns false if all the runes in string are normal, according +// to isNormal +func needsQuoting(str string) bool { + for _, r := range str { + if !isNormal(r) { + return true + } + } + + return false +} + // Non-JSON logging format function func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) { @@ -209,7 +245,12 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, s, ok := _levelToBracket[level] if ok { - l.writer.WriteString(s) + if l.headerColor != ColorOff { + color := _levelToColor[level] + color.Fprint(l.writer, s) + } else { + l.writer.WriteString(s) + } } else { l.writer.WriteString("[?????]") } @@ -263,6 +304,7 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, val = st if st == "" { val = `""` + raw = true } case int: val = strconv.FormatInt(int64(st), 10) @@ -295,6 +337,9 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, continue FOR case Format: val = fmt.Sprintf(st[0].(string), st[1:]...) + case Quote: + raw = true + val = strconv.Quote(string(st)) default: v := reflect.ValueOf(st) if v.Kind() == reflect.Slice { @@ -320,13 +365,11 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, l.writer.WriteString("=\n") writeIndent(l.writer, val, " | ") l.writer.WriteString(" ") - } else if !raw && strings.ContainsAny(val, " \t") { + } else if !raw && needsQuoting(val) { l.writer.WriteByte(' ') l.writer.WriteString(key) l.writer.WriteByte('=') - l.writer.WriteByte('"') - l.writer.WriteString(val) - l.writer.WriteByte('"') + l.writer.WriteString(strconv.Quote(val)) } else { l.writer.WriteByte(' ') l.writer.WriteString(key) @@ -684,9 +727,10 @@ func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { newLog.callerOffset = l.callerOffset + 4 } return &stdlogAdapter{ - log: &newLog, - inferLevels: opts.InferLevels, - forceLevel: opts.ForceLevel, + log: &newLog, + inferLevels: opts.InferLevels, + inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp, + forceLevel: opts.ForceLevel, } } diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go index 7f36b1fd2..858143028 100644 --- a/vendor/github.com/hashicorp/go-hclog/logger.go +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -5,6 +5,7 @@ import ( "log" "os" "strings" + "time" ) var ( @@ -67,6 +68,12 @@ type Octal int // text output. For example: L.Info("bits", Binary(17)) type Binary int +// A simple shortcut to format strings with Go quoting. Control and +// non-printable characters will be escaped with their backslash equivalents in +// output. Intended for untrusted or multiline strings which should be logged +// as concisely as possible. +type Quote string + // ColorOption expresses how the output should be colored, if at all. type ColorOption uint8 @@ -206,6 +213,15 @@ type StandardLoggerOptions struct { // [DEBUG] and strip it off before reapplying it. InferLevels bool + // Indicate that some minimal parsing should be done on strings to try + // and detect their level and re-emit them while ignoring possible + // timestamp values in the beginning of the string. + // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], + // [DEBUG] and strip it off before reapplying it. + // The timestamp detection may result in false positives and incomplete + // string outputs. + InferLevelsWithTimestamp bool + // ForceLevel is used to force all output from the standard logger to be at // the specified level. Similar to InferLevels, this will strip any level // prefix contained in the logged string before applying the forced level. @@ -213,6 +229,8 @@ type StandardLoggerOptions struct { ForceLevel Level } +type TimeFunction = func() time.Time + // LoggerOptions can be used to configure a new logger. type LoggerOptions struct { // Name of the subsystem to prefix logs with @@ -242,6 +260,9 @@ type LoggerOptions struct { // The time format to use instead of the default TimeFormat string + // A function which is called to get the time object that is formatted using `TimeFormat` + TimeFn TimeFunction + // Control whether or not to display the time at all. This is required // because setting TimeFormat to empty assumes the default format. DisableTime bool @@ -250,6 +271,9 @@ type LoggerOptions struct { // are concretely instances of *os.File. Color ColorOption + // Only color the header, not the body. This can help with readability of long messages. + ColorHeaderOnly bool + // A function which is called with the log information and if it returns true the value // should not be logged. // This is useful when interacting with a system that you wish to suppress the log diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go index f35d875d3..641f20ccb 100644 --- a/vendor/github.com/hashicorp/go-hclog/stdlog.go +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -3,16 +3,22 @@ package hclog import ( "bytes" "log" + "regexp" "strings" ) +// Regex to ignore characters commonly found in timestamp formats from the +// beginning of inputs. +var logTimestampRegexp = regexp.MustCompile(`^[\d\s\:\/\.\+-TZ]*`) + // Provides a io.Writer to shim the data out of *log.Logger // and back into our Logger. This is basically the only way to // build upon *log.Logger. type stdlogAdapter struct { - log Logger - inferLevels bool - forceLevel Level + log Logger + inferLevels bool + inferLevelsWithTimestamp bool + forceLevel Level } // Take the data, infer the levels if configured, and send it through @@ -28,6 +34,10 @@ func (s *stdlogAdapter) Write(data []byte) (int, error) { // Log at the forced level s.dispatch(str, s.forceLevel) } else if s.inferLevels { + if s.inferLevelsWithTimestamp { + str = s.trimTimestamp(str) + } + level, str := s.pickLevel(str) s.dispatch(str, level) } else { @@ -64,7 +74,7 @@ func (s *stdlogAdapter) pickLevel(str string) (Level, string) { case strings.HasPrefix(str, "[INFO]"): return Info, strings.TrimSpace(str[6:]) case strings.HasPrefix(str, "[WARN]"): - return Warn, strings.TrimSpace(str[7:]) + return Warn, strings.TrimSpace(str[6:]) case strings.HasPrefix(str, "[ERROR]"): return Error, strings.TrimSpace(str[7:]) case strings.HasPrefix(str, "[ERR]"): @@ -74,6 +84,11 @@ func (s *stdlogAdapter) pickLevel(str string) (Level, string) { } } +func (s *stdlogAdapter) trimTimestamp(str string) string { + idx := logTimestampRegexp.FindStringIndex(str) + return str[idx[1]:] +} + type logWriter struct { l *log.Logger } diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md index 46ee09fc0..39391f24f 100644 --- a/vendor/github.com/hashicorp/go-plugin/README.md +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -3,8 +3,9 @@ `go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system that has been in use by HashiCorp tooling for over 4 years. While initially created for [Packer](https://www.packer.io), it is additionally in use by -[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), and -[Vault](https://www.vaultproject.io). +[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), +[Vault](https://www.vaultproject.io), and +[Boundary](https://www.boundaryproject.io). While the plugin system is over RPC, it is currently only designed to work over a local [reliable] network. Plugins over a real network are not supported diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go index 6b42975b3..e0bee88a1 100644 --- a/vendor/github.com/hashicorp/go-plugin/client.go +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -22,7 +22,8 @@ import ( "sync/atomic" "time" - hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" + "google.golang.org/grpc" ) // If this is 1, then we've called CleanupClients. This can be used @@ -203,6 +204,11 @@ type ClientConfig struct { // // You cannot Reattach to a server with this option enabled. AutoMTLS bool + + // GRPCDialOptions allows plugin users to pass custom grpc.DialOption + // to create gRPC connections. This only affects plugins using the gRPC + // protocol. + GRPCDialOptions []grpc.DialOption } // ReattachConfig is used to configure a client to reattach to an @@ -568,6 +574,8 @@ func (c *Client) Start() (addr net.Addr, err error) { c.config.TLSConfig = &tls.Config{ Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequireAndVerifyClientCert, + MinVersion: tls.VersionTLS12, ServerName: "localhost", } } @@ -623,17 +631,19 @@ func (c *Client) Start() (addr net.Addr, err error) { // Wait for the command to end. err := cmd.Wait() - debugMsgArgs := []interface{}{ + msgArgs := []interface{}{ "path", path, "pid", pid, } if err != nil { - debugMsgArgs = append(debugMsgArgs, + msgArgs = append(msgArgs, []interface{}{"error", err.Error()}...) + c.logger.Error("plugin process exited", msgArgs...) + } else { + // Log and make sure to flush the logs right away + c.logger.Info("plugin process exited", msgArgs...) } - // Log and make sure to flush the logs write away - c.logger.Debug("plugin process exited", debugMsgArgs...) os.Stderr.Sync() // Set that we exited, which takes a lock @@ -768,7 +778,7 @@ func (c *Client) Start() (addr net.Addr, err error) { } // loadServerCert is used by AutoMTLS to read an x.509 cert returned by the -// server, and load it as the RootCA for the client TLSConfig. +// server, and load it as the RootCA and ClientCA for the client TLSConfig. func (c *Client) loadServerCert(cert string) error { certPool := x509.NewCertPool() @@ -785,6 +795,7 @@ func (c *Client) loadServerCert(cert string) error { certPool.AddCert(x509Cert) c.config.TLSConfig.RootCAs = certPool + c.config.TLSConfig.ClientCAs = certPool return nil } diff --git a/vendor/github.com/hashicorp/go-plugin/go.mod b/vendor/github.com/hashicorp/go-plugin/go.mod index 4e182e625..f28852215 100644 --- a/vendor/github.com/hashicorp/go-plugin/go.mod +++ b/vendor/github.com/hashicorp/go-plugin/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/go-plugin -go 1.13 +go 1.17 require ( github.com/golang/protobuf v1.3.4 @@ -9,7 +9,16 @@ require ( github.com/jhump/protoreflect v1.6.0 github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 github.com/oklog/run v1.0.0 - github.com/stretchr/testify v1.3.0 // indirect golang.org/x/net v0.0.0-20190311183353-d8887717615a google.golang.org/grpc v1.27.1 ) + +require ( + github.com/fatih/color v1.7.0 // indirect + github.com/mattn/go-colorable v0.1.4 // indirect + github.com/mattn/go-isatty v0.0.10 // indirect + github.com/stretchr/testify v1.3.0 // indirect + golang.org/x/sys v0.0.0-20191008105621-543471e840be // indirect + golang.org/x/text v0.3.0 // indirect + google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 // indirect +) diff --git a/vendor/github.com/hashicorp/go-plugin/go.sum b/vendor/github.com/hashicorp/go-plugin/go.sum index 56062044e..93d2905cd 100644 --- a/vendor/github.com/hashicorp/go-plugin/go.sum +++ b/vendor/github.com/hashicorp/go-plugin/go.sum @@ -2,7 +2,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -13,7 +12,6 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -50,7 +48,6 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= @@ -60,7 +57,6 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= @@ -74,7 +70,6 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go index 978121913..842903c92 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_client.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -14,9 +14,9 @@ import ( "google.golang.org/grpc/health/grpc_health_v1" ) -func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error)) (*grpc.ClientConn, error) { +func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error), dialOpts ...grpc.DialOption) (*grpc.ClientConn, error) { // Build dialing options. - opts := make([]grpc.DialOption, 0, 5) + opts := make([]grpc.DialOption, 0) // We use a custom dialer so that we can connect over unix domain sockets. opts = append(opts, grpc.WithDialer(dialer)) @@ -37,6 +37,9 @@ func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)), grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(math.MaxInt32))) + // Add our custom options if we have any + opts = append(opts, dialOpts...) + // Connect. Note the first parameter is unused because we use a custom // dialer that has the state to see the address. conn, err := grpc.Dial("unused", opts...) @@ -50,7 +53,7 @@ func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, // newGRPCClient creates a new GRPCClient. The Client argument is expected // to be successfully started already with a lock held. func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) { - conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer) + conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer, c.config.GRPCDialOptions...) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/go-plugin/process_posix.go b/vendor/github.com/hashicorp/go-plugin/process_posix.go index 70ba546bf..185957f8d 100644 --- a/vendor/github.com/hashicorp/go-plugin/process_posix.go +++ b/vendor/github.com/hashicorp/go-plugin/process_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package plugin diff --git a/vendor/github.com/hashicorp/go-plugin/process_windows.go b/vendor/github.com/hashicorp/go-plugin/process_windows.go index 9f7b01809..0eaa7705d 100644 --- a/vendor/github.com/hashicorp/go-plugin/process_windows.go +++ b/vendor/github.com/hashicorp/go-plugin/process_windows.go @@ -19,6 +19,7 @@ func _pidAlive(pid int) bool { if err != nil { return false } + defer syscall.CloseHandle(h) var ec uint32 if e := syscall.GetExitCodeProcess(h, &ec); e != nil { diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go index 5bb18dd5d..449ba6cc1 100644 --- a/vendor/github.com/hashicorp/go-plugin/rpc_server.go +++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go @@ -45,7 +45,11 @@ func (s *RPCServer) Serve(lis net.Listener) { for { conn, err := lis.Accept() if err != nil { - log.Printf("[ERR] plugin: plugin server: %s", err) + severity := "ERR" + if errors.Is(err, net.ErrClosed) { + severity = "DEBUG" + } + log.Printf("[%s] plugin: plugin server: %s", severity, err) return } diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go index 7a58cc391..e13499910 100644 --- a/vendor/github.com/hashicorp/go-plugin/server.go +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -304,13 +304,13 @@ func Serve(opts *ServeConfig) { certPEM, keyPEM, err := generateCert() if err != nil { - logger.Error("failed to generate client certificate", "error", err) + logger.Error("failed to generate server certificate", "error", err) panic(err) } cert, err := tls.X509KeyPair(certPEM, keyPEM) if err != nil { - logger.Error("failed to parse client certificate", "error", err) + logger.Error("failed to parse server certificate", "error", err) panic(err) } @@ -319,6 +319,8 @@ func Serve(opts *ServeConfig) { ClientAuth: tls.RequireAndVerifyClientCert, ClientCAs: clientCertPool, MinVersion: tls.VersionTLS12, + RootCAs: clientCertPool, + ServerName: "localhost", } // We send back the raw leaf cert data for the client rather than the diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE index e87a115e4..a320b309c 100644 --- a/vendor/github.com/hashicorp/go-uuid/LICENSE +++ b/vendor/github.com/hashicorp/go-uuid/LICENSE @@ -1,3 +1,5 @@ +Copyright © 2015-2022 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/go-version/CHANGELOG.md b/vendor/github.com/hashicorp/go-version/CHANGELOG.md index dbae7f7be..094550020 100644 --- a/vendor/github.com/hashicorp/go-version/CHANGELOG.md +++ b/vendor/github.com/hashicorp/go-version/CHANGELOG.md @@ -1,3 +1,10 @@ +# 1.4.0 (January 5, 2022) + +FEATURES: + + - Introduce `MustConstraints()` ([#87](https://github.com/hashicorp/go-version/pull/87)) + - `Constraints`: Introduce `Equals()` and `sort.Interface` methods ([#88](https://github.com/hashicorp/go-version/pull/88)) + # 1.3.0 (March 31, 2021) Please note that CHANGELOG.md does not exist in the source code prior to this release. diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md index 851a337be..4d2505090 100644 --- a/vendor/github.com/hashicorp/go-version/README.md +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -1,5 +1,5 @@ # Versioning Library for Go -[![Build Status](https://circleci.com/gh/hashicorp/go-version/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-version/tree/master) +[![Build Status](https://circleci.com/gh/hashicorp/go-version/tree/main.svg?style=svg)](https://circleci.com/gh/hashicorp/go-version/tree/main) [![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version) go-version is a library for parsing versions and version constraints, diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go index d05575961..1d8809028 100644 --- a/vendor/github.com/hashicorp/go-version/constraint.go +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -4,6 +4,7 @@ import ( "fmt" "reflect" "regexp" + "sort" "strings" ) @@ -11,30 +12,40 @@ import ( // ">= 1.0". type Constraint struct { f constraintFunc + op operator check *Version original string } +func (c *Constraint) Equals(con *Constraint) bool { + return c.op == con.op && c.check.Equal(con.check) +} + // Constraints is a slice of constraints. We make a custom type so that // we can add methods to it. type Constraints []*Constraint type constraintFunc func(v, c *Version) bool -var constraintOperators map[string]constraintFunc +var constraintOperators map[string]constraintOperation + +type constraintOperation struct { + op operator + f constraintFunc +} var constraintRegexp *regexp.Regexp func init() { - constraintOperators = map[string]constraintFunc{ - "": constraintEqual, - "=": constraintEqual, - "!=": constraintNotEqual, - ">": constraintGreaterThan, - "<": constraintLessThan, - ">=": constraintGreaterThanEqual, - "<=": constraintLessThanEqual, - "~>": constraintPessimistic, + constraintOperators = map[string]constraintOperation{ + "": {op: equal, f: constraintEqual}, + "=": {op: equal, f: constraintEqual}, + "!=": {op: notEqual, f: constraintNotEqual}, + ">": {op: greaterThan, f: constraintGreaterThan}, + "<": {op: lessThan, f: constraintLessThan}, + ">=": {op: greaterThanEqual, f: constraintGreaterThanEqual}, + "<=": {op: lessThanEqual, f: constraintLessThanEqual}, + "~>": {op: pessimistic, f: constraintPessimistic}, } ops := make([]string, 0, len(constraintOperators)) @@ -66,6 +77,16 @@ func NewConstraint(v string) (Constraints, error) { return Constraints(result), nil } +// MustConstraints is a helper that wraps a call to a function +// returning (Constraints, error) and panics if error is non-nil. +func MustConstraints(c Constraints, err error) Constraints { + if err != nil { + panic(err) + } + + return c +} + // Check tests if a version satisfies all the constraints. func (cs Constraints) Check(v *Version) bool { for _, c := range cs { @@ -77,6 +98,56 @@ func (cs Constraints) Check(v *Version) bool { return true } +// Equals compares Constraints with other Constraints +// for equality. This may not represent logical equivalence +// of compared constraints. +// e.g. even though '>0.1,>0.2' is logically equivalent +// to '>0.2' it is *NOT* treated as equal. +// +// Missing operator is treated as equal to '=', whitespaces +// are ignored and constraints are sorted before comaparison. +func (cs Constraints) Equals(c Constraints) bool { + if len(cs) != len(c) { + return false + } + + // make copies to retain order of the original slices + left := make(Constraints, len(cs)) + copy(left, cs) + sort.Stable(left) + right := make(Constraints, len(c)) + copy(right, c) + sort.Stable(right) + + // compare sorted slices + for i, con := range left { + if !con.Equals(right[i]) { + return false + } + } + + return true +} + +func (cs Constraints) Len() int { + return len(cs) +} + +func (cs Constraints) Less(i, j int) bool { + if cs[i].op < cs[j].op { + return true + } + if cs[i].op > cs[j].op { + return false + } + + return cs[i].check.LessThan(cs[j].check) +} + +func (cs Constraints) Swap(i, j int) { + cs[i], cs[j] = cs[j], cs[i] +} + // Returns the string format of the constraints func (cs Constraints) String() string { csStr := make([]string, len(cs)) @@ -107,8 +178,11 @@ func parseSingle(v string) (*Constraint, error) { return nil, err } + cop := constraintOperators[matches[1]] + return &Constraint{ - f: constraintOperators[matches[1]], + f: cop.f, + op: cop.op, check: check, original: v, }, nil @@ -138,6 +212,18 @@ func prereleaseCheck(v, c *Version) bool { // Constraint functions //------------------------------------------------------------------- +type operator rune + +const ( + equal operator = '=' + notEqual operator = '≠' + greaterThan operator = '>' + lessThan operator = '<' + greaterThanEqual operator = '≥' + lessThanEqual operator = '≤' + pessimistic operator = '~' +) + func constraintEqual(v, c *Version) bool { return v.Equal(c) } diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go index 8068834ec..e87df6990 100644 --- a/vendor/github.com/hashicorp/go-version/version.go +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -64,7 +64,6 @@ func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { } segmentsStr := strings.Split(matches[1], ".") segments := make([]int64, len(segmentsStr)) - si := 0 for i, str := range segmentsStr { val, err := strconv.ParseInt(str, 10, 64) if err != nil { @@ -72,8 +71,7 @@ func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { "Error parsing version: %s", err) } - segments[i] = int64(val) - si++ + segments[i] = val } // Even though we could support more than three segments, if we @@ -92,7 +90,7 @@ func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { metadata: matches[10], pre: pre, segments: segments, - si: si, + si: len(segmentsStr), original: v, }, nil } @@ -390,3 +388,20 @@ func (v *Version) String() string { func (v *Version) Original() string { return v.original } + +// UnmarshalText implements encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(b []byte) error { + temp, err := NewVersion(string(b)) + if err != nil { + return err + } + + *v = *temp + + return nil +} + +// MarshalText implements encoding.TextMarshaler interface. +func (v *Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} diff --git a/vendor/github.com/hashicorp/hc-install/go.mod b/vendor/github.com/hashicorp/hc-install/go.mod index a460fcc66..82beffe1c 100644 --- a/vendor/github.com/hashicorp/hc-install/go.mod +++ b/vendor/github.com/hashicorp/hc-install/go.mod @@ -4,10 +4,10 @@ go 1.16 require ( github.com/go-git/go-git/v5 v5.4.2 - github.com/google/go-cmp v0.5.6 + github.com/google/go-cmp v0.5.8 github.com/hashicorp/go-checkpoint v0.5.0 github.com/hashicorp/go-cleanhttp v0.5.2 github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/go-version v1.3.0 + github.com/hashicorp/go-version v1.4.0 golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e ) diff --git a/vendor/github.com/hashicorp/hc-install/go.sum b/vendor/github.com/hashicorp/hc-install/go.sum index 276292194..e2b49f815 100644 --- a/vendor/github.com/hashicorp/hc-install/go.sum +++ b/vendor/github.com/hashicorp/hc-install/go.sum @@ -28,8 +28,8 @@ github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6 github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= @@ -41,8 +41,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.3.0 h1:McDWVJIU/y+u1BRV06dPaLfLCaT7fUTJLp5r04x7iNw= -github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.4.0 h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4= +github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -102,8 +102,6 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go index 4936dda2f..2b85ea344 100644 --- a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go @@ -121,12 +121,23 @@ func fileMapFromChecksums(checksums strings.Builder) (ChecksumFileMap, error) { return csMap, nil } -func compareChecksum(logger *log.Logger, r io.Reader, verifiedHashSum HashSum) error { +func compareChecksum(logger *log.Logger, r io.Reader, verifiedHashSum HashSum, filename string, expectedSize int64) error { h := sha256.New() - _, err := io.Copy(h, r) + + // This may take a while depending on network connection as the io.Reader + // is expected to be http.Response.Body which streams the bytes + // on demand over the network. + logger.Printf("copying %q (%d bytes) to calculate checksum", filename, expectedSize) + bytesCopied, err := io.Copy(h, r) if err != nil { return err } + logger.Printf("copied %d bytes of %q", bytesCopied, filename) + + if expectedSize != 0 && bytesCopied != int64(expectedSize) { + return fmt.Errorf("unexpected size (downloaded: %d, expected: %d)", + bytesCopied, expectedSize) + } calculatedSum := h.Sum(nil) if !bytes.Equal(calculatedSum, verifiedHashSum) { diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go index 62ed6afd0..e9cd94e43 100644 --- a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go @@ -12,7 +12,6 @@ import ( "os" "path/filepath" "runtime" - "strconv" "github.com/hashicorp/hc-install/internal/httpclient" ) @@ -95,14 +94,16 @@ func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, contentType, zipMimeTypes) } + expectedSize := resp.ContentLength + if d.VerifyChecksum { - d.Logger.Printf("calculating checksum of %q", pb.Filename) + d.Logger.Printf("verifying checksum of %q", pb.Filename) // provide extra reader to calculate & compare checksum var buf bytes.Buffer r := io.TeeReader(resp.Body, &buf) pkgReader = &buf - err := compareChecksum(d.Logger, r, verifiedChecksum) + err := compareChecksum(d.Logger, r, verifiedChecksum, pb.Filename, expectedSize) if err != nil { return err } @@ -114,21 +115,17 @@ func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, } defer pkgFile.Close() - d.Logger.Printf("copying downloaded file to %s", pkgFile.Name()) + d.Logger.Printf("copying %q (%d bytes) to %s", pb.Filename, expectedSize, pkgFile.Name()) + // Unless the bytes were already downloaded above for checksum verification + // this may take a while depending on network connection as the io.Reader + // is expected to be http.Response.Body which streams the bytes + // on demand over the network. bytesCopied, err := io.Copy(pkgFile, pkgReader) if err != nil { return err } d.Logger.Printf("copied %d bytes to %s", bytesCopied, pkgFile.Name()) - expectedSize := 0 - if length := resp.Header.Get("content-length"); length != "" { - var err error - expectedSize, err = strconv.Atoi(length) - if err != nil { - return err - } - } if expectedSize != 0 && bytesCopied != int64(expectedSize) { return fmt.Errorf("unexpected size (downloaded: %d, expected: %d)", bytesCopied, expectedSize) diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go index 24e32ffc4..849f16a52 100644 --- a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go @@ -79,7 +79,7 @@ func (r *Releases) ListProductVersions(ctx context.Context, productName string) } contentType := resp.Header.Get("content-type") - if contentType != "application/json" { + if contentType != "application/json" && contentType != "application/vnd+hashicorp.releases-api.v0+json" { return nil, fmt.Errorf("unexpected Content-Type: %q", contentType) } @@ -144,7 +144,7 @@ func (r *Releases) GetProductVersion(ctx context.Context, product string, versio } contentType := resp.Header.Get("content-type") - if contentType != "application/json" { + if contentType != "application/json" && contentType != "application/vnd+hashicorp.releases-api.v0+json" { return nil, fmt.Errorf("unexpected Content-Type: %q", contentType) } diff --git a/vendor/github.com/hashicorp/hc-install/product/consul.go b/vendor/github.com/hashicorp/hc-install/product/consul.go index b619d89ed..aeeac9469 100644 --- a/vendor/github.com/hashicorp/hc-install/product/consul.go +++ b/vendor/github.com/hashicorp/hc-install/product/consul.go @@ -15,9 +15,7 @@ import ( var consulVersionOutputRe = regexp.MustCompile(`Consul ` + simpleVersionRe) var ( - v1_16 = version.Must(version.NewVersion("1.16")) - // TODO: version.MustConstraint() ? - v1_16c, _ = version.NewConstraint("1.16") + v1_18 = version.Must(version.NewVersion("1.18")) ) var Consul = Product{ @@ -52,6 +50,6 @@ var Consul = Product{ BuildInstructions: &BuildInstructions{ GitRepoURL: "https://github.com/hashicorp/consul.git", PreCloneCheck: &build.GoIsInstalled{}, - Build: &build.GoBuild{Version: v1_16}, + Build: &build.GoBuild{Version: v1_18}, }, } diff --git a/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md b/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md index 4c644fcfb..aa6faaf2f 100644 --- a/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md +++ b/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md @@ -1,5 +1,150 @@ # HCL Changelog +## v2.12.0 (April 22, 2022) + +### Enhancements + +* hclsyntax: Evaluation of conditional expressions will now produce more precise error messages about inconsistencies between the types of the true and false result expressions, particularly in cases where both are of the same structural type kind but differ in their nested elements. ([#530](https://github.com/hashicorp/hcl/pull/530)) +* hclsyntax: The lexer will no longer allocate a small object on the heap for each token. Instead, in that situation it will allocate only when needed to return a diagnostic message with source location information. ([#490](https://github.com/hashicorp/hcl/pull/490)) +* hclwrite: New functions `TokensForTuple`, `TokensForObject`, and `TokensForFunctionCall` allow for more easily constructing the three constructs which are supported for static analysis and which HCL-based languages typically use in contexts where an expression is used only for its syntax, and not evaluated to produce a real value. For example, these new functions together are sufficient to construct all valid type constraint expressions from [the Type Expressions Extension](./ext/typeexpr/), which is the basis of variable type constraints in the Terraform language at the time of writing. ([#502](https://github.com/hashicorp/hcl/pull/502)) +* json: New functions `IsJSONExpression` and `IsJSONBody` to determine if a given expression or body was created by the JSON syntax parser. In normal situations it's better not to worry about what syntax a particular expression/body originated in, but this can be useful in some trickier cases where an application needs to shim for backwards-compatibility or for static analysis that needs to have special handling of the JSON syntax's embedded expression/template conventions. ([#524](https://github.com/hashicorp/hcl/pull/524)) + +### Bugs Fixed + +* gohcl: Fix docs about supported types for blocks. ([#507](https://github.com/hashicorp/hcl/pull/507)) + +## v2.11.1 (December 1, 2021) + +### Bugs Fixed + +* hclsyntax: The type for an upgraded unknown value with a splat expression cannot be known ([#495](https://github.com/hashicorp/hcl/pull/495)) + +## v2.11.0 (December 1, 2021) + +### Enhancements + +* hclsyntax: Various error messages related to unexpectedly reaching end of file while parsing a delimited subtree will now return specialized messages describing the opening tokens as "unclosed", instead of returning a generic diagnostic that just happens to refer to the empty source range at the end of the file. This gives better feedback when error messages are being presented alongside a source code snippet, as is common in HCL-based applications, because it shows which innermost container the parser was working on when it encountered the error. ([#492](https://github.com/hashicorp/hcl/pull/492)) + +### Bugs Fixed + +* hclsyntax: Upgrading an unknown single value to a list using a splat expression must return unknown ([#493](https://github.com/hashicorp/hcl/pull/493)) + +## v2.10.1 (July 21, 2021) + +* dynblock: Decode unknown dynamic blocks in order to obtain any diagnostics even though the decoded value is not used ([#476](https://github.com/hashicorp/hcl/pull/476)) +* hclsyntax: Calling functions is now more robust in the face of an incorrectly-implemented function which returns a `function.ArgError` whose argument index is out of range for the length of the arguments. Previously this would often lead to a panic, but now it'll return a less-precice error message instead. Functions that return out-of-bounds argument indices still ought to be fixed so that the resulting error diagnostics can be as precise as possible. ([#472](https://github.com/hashicorp/hcl/pull/472)) +* hclsyntax: Ensure marks on unknown values are maintained when processing string templates. ([#478](https://github.com/hashicorp/hcl/pull/478)) +* hcl: Improved error messages for various common error situtions in `hcl.Index` and `hcl.GetAttr`. These are part of the implementation of indexing and attribute lookup in the native syntax expression language too, so the new error messages will apply to problems using those operators. ([#474](https://github.com/hashicorp/hcl/pull/474)) + +## v2.10.0 (April 20, 2021) + +### Enhancements + +* dynblock,hcldec: Using dynblock in conjunction with hcldec can now decode blocks with unknown dynamic for_each arguments as entirely unknown values ([#461](https://github.com/hashicorp/hcl/pull/461)) +* hclsyntax: Some syntax errors during parsing of the inside of `${` ... `}` template interpolation sequences will now produce an extra hint message about the need to escape as `$${` when trying to include interpolation syntax for other languages like shell scripting, AWS IAM policies, etc. ([#462](https://github.com/hashicorp/hcl/pull/462)) + +## v2.9.1 (March 10, 2021) + +### Bugs Fixed + +* hclsyntax: Fix panic for marked index value. ([#451](https://github.com/hashicorp/hcl/pull/451)) + +## v2.9.0 (February 23, 2021) + +### Enhancements + +* HCL's native syntax and JSON scanners -- and thus all of the other parsing components that build on top of them -- are now using Unicode 13 rules for text segmentation when counting text characters for the purpose of reporting source location columns. Previously HCL was using Unicode 12. Unicode 13 still uses the same algorithm but includes some additions to the character tables the algorithm is defined in terms of, to properly categorize new characters defined in Unicode 13. + +## v2.8.2 (January 6, 2021) + +### Bugs Fixed + +* hclsyntax: Fix panic for marked collection splat. ([#436](https://github.com/hashicorp/hcl/pull/436)) +* hclsyntax: Fix panic for marked template loops. ([#437](https://github.com/hashicorp/hcl/pull/437)) +* hclsyntax: Fix `for` expression marked conditional. ([#438](https://github.com/hashicorp/hcl/pull/438)) +* hclsyntax: Mark objects with keys that are sensitive. ([#440](https://github.com/hashicorp/hcl/pull/440)) + +## v2.8.1 (December 17, 2020) + +### Bugs Fixed + +* hclsyntax: Fix panic when expanding marked function arguments. ([#429](https://github.com/hashicorp/hcl/pull/429)) +* hclsyntax: Error when attempting to use a marked value as an object key. ([#434](https://github.com/hashicorp/hcl/pull/434)) +* hclsyntax: Error when attempting to use a marked value as an object key in expressions. ([#433](https://github.com/hashicorp/hcl/pull/433)) + +## v2.8.0 (December 7, 2020) + +### Enhancements + +* hclsyntax: Expression grouping parentheses will now be reflected by an explicit node in the AST, whereas before they were only considered during parsing. ([#426](https://github.com/hashicorp/hcl/pull/426)) + +### Bugs Fixed + +* hclwrite: The parser will now correctly include the `(` and `)` tokens when an expression is surrounded by parentheses. Previously it would incorrectly recognize those tokens as being extraneous tokens outside of the expression. ([#426](https://github.com/hashicorp/hcl/pull/426)) +* hclwrite: The formatter will now remove (rather than insert) spaces between the `!` (unary boolean "not") operator and its subsequent operand. ([#403](https://github.com/hashicorp/hcl/pull/403)) +* hclsyntax: Unmark conditional values in expressions before checking their truthfulness ([#427](https://github.com/hashicorp/hcl/pull/427)) + +## v2.7.2 (November 30, 2020) + +### Bugs Fixed + +* gohcl: Fix panic when decoding into type containing value slices. ([#335](https://github.com/hashicorp/hcl/pull/335)) +* hclsyntax: The unusual expression `null[*]` was previously always returning an unknown value, even though the rules for `[*]` normally call for it to return an empty tuple when applied to a null. As well as being a surprising result, it was particularly problematic because it violated the rule that a calling application may assume that an expression result will always be known unless the application itself introduces unknown values via the evaluation context. `null[*]` will now produce an empty tuple. ([#416](https://github.com/hashicorp/hcl/pull/416)) +* hclsyntax: Fix panic when traversing a list, tuple, or map with cty "marks" ([#424](https://github.com/hashicorp/hcl/pull/424)) + +## v2.7.1 (November 18, 2020) + +### Bugs Fixed + +* hclwrite: Correctly handle blank quoted string block labels, instead of dropping them ([#422](https://github.com/hashicorp/hcl/pull/422)) + +## v2.7.0 (October 14, 2020) + +### Enhancements + +* json: There is a new function `ParseWithStartPos`, which allows overriding the starting position for parsing in case the given JSON bytes are a fragment of a larger document, such as might happen when decoding with `encoding/json` into a `json.RawMessage`. ([#389](https://github.com/hashicorp/hcl/pull/389)) +* json: There is a new function `ParseExpression`, which allows parsing a JSON string directly in expression mode, whereas previously it was only possible to parse a JSON string in body mode. ([#381](https://github.com/hashicorp/hcl/pull/381)) +* hclwrite: `Block` type now supports `SetType` and `SetLabels`, allowing surgical changes to the type and labels of an existing block without having to reconstruct the entire block. ([#340](https://github.com/hashicorp/hcl/pull/340)) + +### Bugs Fixed + +* hclsyntax: Fix confusing error message for bitwise OR operator ([#380](https://github.com/hashicorp/hcl/pull/380)) +* hclsyntax: Several bug fixes for using HCL with values containing cty "marks" ([#404](https://github.com/hashicorp/hcl/pull/404), [#406](https://github.com/hashicorp/hcl/pull/404), [#407](https://github.com/hashicorp/hcl/pull/404)) + +## v2.6.0 (June 4, 2020) + +### Enhancements + +* hcldec: Add a new `Spec`, `ValidateSpec`, which allows custom validation of values at decode-time. ([#387](https://github.com/hashicorp/hcl/pull/387)) + +### Bugs Fixed + +* hclsyntax: Fix panic with combination of sequences and null arguments ([#386](https://github.com/hashicorp/hcl/pull/386)) +* hclsyntax: Fix handling of unknown values and sequences ([#386](https://github.com/hashicorp/hcl/pull/386)) + +## v2.5.1 (May 14, 2020) + +### Bugs Fixed + +* hclwrite: handle legacy dot access of numeric indexes. ([#369](https://github.com/hashicorp/hcl/pull/369)) +* hclwrite: Fix panic for dotted full splat (`foo.*`) ([#374](https://github.com/hashicorp/hcl/pull/374)) + +## v2.5.0 (May 6, 2020) + +### Enhancements + +* hclwrite: Generate multi-line objects and maps. ([#372](https://github.com/hashicorp/hcl/pull/372)) + +## v2.4.0 (Apr 13, 2020) + +### Enhancements + +* The Unicode data tables that HCL uses to produce user-perceived "column" positions in diagnostics and other source ranges are now updated to Unicode 12.0.0, which will cause HCL to produce more accurate column numbers for combining characters introduced to Unicode since Unicode 9.0.0. + +### Bugs Fixed + +* json: Fix panic when parsing malformed JSON. ([#358](https://github.com/hashicorp/hcl/pull/358)) + ## v2.3.0 (Jan 3, 2020) ### Enhancements diff --git a/vendor/github.com/hashicorp/hcl/v2/README.md b/vendor/github.com/hashicorp/hcl/v2/README.md index 3d0d509d5..9af736c9d 100644 --- a/vendor/github.com/hashicorp/hcl/v2/README.md +++ b/vendor/github.com/hashicorp/hcl/v2/README.md @@ -33,11 +33,25 @@ package main import ( "log" + "github.com/hashicorp/hcl/v2/hclsimple" ) type Config struct { - LogLevel string `hcl:"log_level"` + IOMode string `hcl:"io_mode"` + Service ServiceConfig `hcl:"service,block"` +} + +type ServiceConfig struct { + Protocol string `hcl:"protocol,label"` + Type string `hcl:"type,label"` + ListenAddr string `hcl:"listen_addr"` + Processes []ProcessConfig `hcl:"process,block"` +} + +type ProcessConfig struct { + Type string `hcl:"type,label"` + Command []string `hcl:"command"` } func main() { diff --git a/vendor/github.com/hashicorp/hcl/v2/appveyor.yml b/vendor/github.com/hashicorp/hcl/v2/appveyor.yml deleted file mode 100644 index e382f8f57..000000000 --- a/vendor/github.com/hashicorp/hcl/v2/appveyor.yml +++ /dev/null @@ -1,13 +0,0 @@ -build: off - -clone_folder: c:\gopath\src\github.com\hashicorp\hcl - -environment: - GOPATH: c:\gopath - GO111MODULE: on - GOPROXY: https://goproxy.io - -stack: go 1.12 - -test_script: - - go test ./... diff --git a/vendor/github.com/hashicorp/hcl/v2/diagnostic.go b/vendor/github.com/hashicorp/hcl/v2/diagnostic.go index c320961e1..c80535b7a 100644 --- a/vendor/github.com/hashicorp/hcl/v2/diagnostic.go +++ b/vendor/github.com/hashicorp/hcl/v2/diagnostic.go @@ -22,14 +22,14 @@ const ( ) // Diagnostic represents information to be presented to a user about an -// error or anomoly in parsing or evaluating configuration. +// error or anomaly in parsing or evaluating configuration. type Diagnostic struct { Severity DiagnosticSeverity // Summary and Detail contain the English-language description of the // problem. Summary is a terse description of the general problem and // detail is a more elaborate, often-multi-sentence description of - // the probem and what might be done to solve it. + // the problem and what might be done to solve it. Summary string Detail string diff --git a/vendor/github.com/hashicorp/hcl/v2/go.mod b/vendor/github.com/hashicorp/hcl/v2/go.mod index d80c99d9b..0529faa97 100644 --- a/vendor/github.com/hashicorp/hcl/v2/go.mod +++ b/vendor/github.com/hashicorp/hcl/v2/go.mod @@ -1,9 +1,11 @@ module github.com/hashicorp/hcl/v2 +go 1.12 + require ( github.com/agext/levenshtein v1.2.1 github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3 - github.com/apparentlymart/go-textseg v1.0.0 + github.com/apparentlymart/go-textseg/v13 v13.0.0 github.com/davecgh/go-spew v1.1.1 github.com/go-test/deep v1.0.3 github.com/google/go-cmp v0.3.1 @@ -14,8 +16,8 @@ require ( github.com/sergi/go-diff v1.0.0 github.com/spf13/pflag v1.0.2 github.com/stretchr/testify v1.2.2 // indirect - github.com/zclconf/go-cty v1.2.0 + github.com/zclconf/go-cty v1.8.0 + github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82 // indirect - golang.org/x/text v0.3.2 // indirect ) diff --git a/vendor/github.com/hashicorp/hcl/v2/go.sum b/vendor/github.com/hashicorp/hcl/v2/go.sum index 76b135fb4..6c1475673 100644 --- a/vendor/github.com/hashicorp/hcl/v2/go.sum +++ b/vendor/github.com/hashicorp/hcl/v2/go.sum @@ -4,11 +4,15 @@ github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3 h1:ZSTrOEhi github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= +github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= +github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= @@ -29,23 +33,30 @@ github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/zclconf/go-cty v1.2.0 h1:sPHsy7ADcIZQP3vILvTjrh74ZA175TFP5vqiNK1UmlI= +github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= +github.com/zclconf/go-cty v1.8.0 h1:s4AvqaeQzJIu3ndv4gVIhplVD0krU+bgrcLSVUnaWuA= +github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 h1:p/H982KKEjUnLJkM3tt/LemDnOc1GiZL5FCVlORJ5zo= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82 h1:vsphBvatvfbhlb4PO1BYSr9dzugGxJ/SQHoNufZJq1w= golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go index 3fe84ddc3..2706998f8 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go @@ -2,6 +2,7 @@ package hclsyntax import ( "fmt" + "sort" "sync" "github.com/hashicorp/hcl/v2" @@ -27,6 +28,32 @@ type Expression interface { // Assert that Expression implements hcl.Expression var assertExprImplExpr hcl.Expression = Expression(nil) +// ParenthesesExpr represents an expression written in grouping +// parentheses. +// +// The parser takes care of the precedence effect of the parentheses, so the +// only purpose of this separate expression node is to capture the source range +// of the parentheses themselves, rather than the source range of the +// expression within. All of the other expression operations just pass through +// to the underlying expression. +type ParenthesesExpr struct { + Expression + SrcRange hcl.Range +} + +var _ hcl.Expression = (*ParenthesesExpr)(nil) + +func (e *ParenthesesExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ParenthesesExpr) walkChildNodes(w internalWalkFunc) { + // We override the walkChildNodes from the embedded Expression to + // ensure that both the parentheses _and_ the content are visible + // in a walk. + w(e.Expression) +} + // LiteralValueExpr is an expression that just always returns a given value. type LiteralValueExpr struct { Val cty.Value @@ -260,6 +287,20 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti } switch { + case expandVal.Type().Equals(cty.DynamicPseudoType): + if expandVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expanding argument value", + Detail: "The expanding argument (indicated by ...) must not be null.", + Subject: expandExpr.Range().Ptr(), + Context: e.Range().Ptr(), + Expression: expandExpr, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + return cty.DynamicVal, diags case expandVal.Type().IsTupleType() || expandVal.Type().IsListType() || expandVal.Type().IsSetType(): if expandVal.IsNull() { diags = append(diags, &hcl.Diagnostic{ @@ -277,13 +318,17 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti return cty.DynamicVal, diags } + // When expanding arguments from a collection, we must first unmark + // the collection itself, and apply any marks directly to the + // elements. This ensures that marks propagate correctly. + expandVal, marks := expandVal.Unmark() newArgs := make([]Expression, 0, (len(args)-1)+expandVal.LengthInt()) newArgs = append(newArgs, args[:len(args)-1]...) it := expandVal.ElementIterator() for it.Next() { _, val := it.Element() newArgs = append(newArgs, &LiteralValueExpr{ - Val: val, + Val: val.WithMarks(marks), SrcRange: expandExpr.Range(), }) } @@ -406,22 +451,72 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti } else { param = varParam } - argExpr := e.Args[i] - // TODO: we should also unpick a PathError here and show the - // path to the deep value where the error was detected. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid function argument", - Detail: fmt.Sprintf( - "Invalid value for %q parameter: %s.", - param.Name, err, - ), - Subject: argExpr.StartRange().Ptr(), - Context: e.Range().Ptr(), - Expression: argExpr, - EvalContext: ctx, - }) + if param == nil || i > len(args)-1 { + // Getting here means that the function we called has a bug: + // it returned an arg error that refers to an argument index + // that wasn't present in the call. For that situation + // we'll degrade to a less specific error just to give + // some sort of answer, but best to still fix the buggy + // function so that it only returns argument indices that + // are in range. + switch { + case param != nil: + // In this case we'll assume that the function was trying + // to talk about a final variadic parameter but the caller + // didn't actually provide any arguments for it. That means + // we can at least still name the parameter in the + // error message, but our source range will be the call + // as a whole because we don't have an argument expression + // to highlight specifically. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid function argument", + Detail: fmt.Sprintf( + "Invalid value for %q parameter: %s.", + param.Name, err, + ), + Subject: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }) + default: + // This is the most degenerate case of all, where the + // index is out of range even for the declared parameters, + // and so we can't tell which parameter the function is + // trying to report an error for. Just a generic error + // report in that case. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Error in function call", + Detail: fmt.Sprintf( + "Call to function %q failed: %s.", + e.Name, err, + ), + Subject: e.StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }) + } + } else { + argExpr := args[i] + + // TODO: we should also unpick a PathError here and show the + // path to the deep value where the error was detected. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid function argument", + Detail: fmt.Sprintf( + "Invalid value for %q parameter: %s.", + param.Name, err, + ), + Subject: argExpr.StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: argExpr, + EvalContext: ctx, + }) + } default: diags = append(diags, &hcl.Diagnostic{ @@ -521,12 +616,8 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic Severity: hcl.DiagError, Summary: "Inconsistent conditional result types", Detail: fmt.Sprintf( - // FIXME: Need a helper function for showing natural-language type diffs, - // since this will generate some useless messages in some cases, like - // "These expressions are object and object respectively" if the - // object types don't exactly match. - "The true and false result expressions must have consistent types. The given expressions are %s and %s, respectively.", - trueResult.Type().FriendlyName(), falseResult.Type().FriendlyName(), + "The true and false result expressions must have consistent types. %s.", + describeConditionalTypeMismatch(trueResult.Type(), falseResult.Type()), ), Subject: hcl.RangeBetween(e.TrueResult.Range(), e.FalseResult.Range()).Ptr(), Context: &e.SrcRange, @@ -558,7 +649,7 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Incorrect condition type", - Detail: fmt.Sprintf("The condition expression must be of type bool."), + Detail: "The condition expression must be of type bool.", Subject: e.Condition.Range().Ptr(), Context: &e.SrcRange, Expression: e.Condition, @@ -567,6 +658,8 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic return cty.UnknownVal(resultType), diags } + // Unmark result before testing for truthiness + condResult, _ = condResult.UnmarkDeep() if condResult.True() { diags = append(diags, trueDiags...) if convs[0] != nil { @@ -616,6 +709,144 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic } } +// describeConditionalTypeMismatch makes a best effort to describe the +// difference between types in the true and false arms of a conditional +// expression in a way that would be useful to someone trying to understand +// why their conditional expression isn't valid. +// +// NOTE: This function is only designed to deal with situations +// where trueTy and falseTy are different. Calling it with two equal +// types will produce a nonsense result. This function also only really +// deals with situations that type unification can't resolve, so we should +// call this function only after trying type unification first. +func describeConditionalTypeMismatch(trueTy, falseTy cty.Type) string { + // The main tricky cases here are when both trueTy and falseTy are + // of the same structural type kind, such as both being object types + // or both being tuple types. In that case the "FriendlyName" method + // returns only "object" or "tuple" and so we need to do some more + // work to describe what's different inside them. + + switch { + case trueTy.IsObjectType() && falseTy.IsObjectType(): + // We'll first gather up the attribute names and sort them. In the + // event that there are multiple attributes that disagree across + // the two types, we'll prefer to report the one that sorts lexically + // least just so that our error message is consistent between + // evaluations. + var trueAttrs, falseAttrs []string + for name := range trueTy.AttributeTypes() { + trueAttrs = append(trueAttrs, name) + } + sort.Strings(trueAttrs) + for name := range falseTy.AttributeTypes() { + falseAttrs = append(falseAttrs, name) + } + sort.Strings(falseAttrs) + + for _, name := range trueAttrs { + if !falseTy.HasAttribute(name) { + return fmt.Sprintf("The 'true' value includes object attribute %q, which is absent in the 'false' value", name) + } + trueAty := trueTy.AttributeType(name) + falseAty := falseTy.AttributeType(name) + if !trueAty.Equals(falseAty) { + // For deeply-nested differences this will likely get very + // clunky quickly by nesting these messages inside one another, + // but we'll accept that for now in the interests of producing + // _some_ useful feedback, even if it isn't as concise as + // we'd prefer it to be. Deeply-nested structures in + // conditionals are thankfully not super common. + return fmt.Sprintf( + "Type mismatch for object attribute %q: %s", + name, describeConditionalTypeMismatch(trueAty, falseAty), + ) + } + } + for _, name := range falseAttrs { + if !trueTy.HasAttribute(name) { + return fmt.Sprintf("The 'false' value includes object attribute %q, which is absent in the 'true' value", name) + } + // NOTE: We don't need to check the attribute types again, because + // any attribute that both types have in common would already have + // been checked in the previous loop. + } + case trueTy.IsTupleType() && falseTy.IsTupleType(): + trueEtys := trueTy.TupleElementTypes() + falseEtys := falseTy.TupleElementTypes() + + if trueCount, falseCount := len(trueEtys), len(falseEtys); trueCount != falseCount { + return fmt.Sprintf("The 'true' tuple has length %d, but the 'false' tuple has length %d", trueCount, falseCount) + } + + // NOTE: Thanks to the condition above, we know that both tuples are + // of the same length and so they must have some differing types + // instead. + for i := range trueEtys { + trueEty := trueEtys[i] + falseEty := falseEtys[i] + + if !trueEty.Equals(falseEty) { + // For deeply-nested differences this will likely get very + // clunky quickly by nesting these messages inside one another, + // but we'll accept that for now in the interests of producing + // _some_ useful feedback, even if it isn't as concise as + // we'd prefer it to be. Deeply-nested structures in + // conditionals are thankfully not super common. + return fmt.Sprintf( + "Type mismatch for tuple element %d: %s", + i, describeConditionalTypeMismatch(trueEty, falseEty), + ) + } + } + case trueTy.IsCollectionType() && falseTy.IsCollectionType(): + // For this case we're specifically interested in the situation where: + // - both collections are of the same kind, AND + // - the element types of both are either object or tuple types. + // This is just to avoid writing a useless statement like + // "The 'true' value is list of object, but the 'false' value is list of object". + // This still doesn't account for more awkward cases like collections + // of collections of structural types, but we won't let perfect be + // the enemy of the good. + trueEty := trueTy.ElementType() + falseEty := falseTy.ElementType() + if (trueTy.IsListType() && falseTy.IsListType()) || (trueTy.IsMapType() && falseTy.IsMapType()) || (trueTy.IsSetType() && falseTy.IsSetType()) { + if (trueEty.IsObjectType() && falseEty.IsObjectType()) || (trueEty.IsTupleType() && falseEty.IsTupleType()) { + noun := "collection" + switch { // NOTE: We now know that trueTy and falseTy have the same collection kind + case trueTy.IsListType(): + noun = "list" + case trueTy.IsSetType(): + noun = "set" + case trueTy.IsMapType(): + noun = "map" + } + return fmt.Sprintf( + "Mismatched %s element types: %s", + noun, describeConditionalTypeMismatch(trueEty, falseEty), + ) + } + } + } + + // If we don't manage any more specialized message, we'll just report + // what the two types are. + trueName := trueTy.FriendlyName() + falseName := falseTy.FriendlyName() + if trueName == falseName { + // Absolute last resort for when we have no special rule above but + // we have two types with the same friendly name anyway. This is + // the most vague of all possible messages but is reserved for + // particularly awkward cases, like lists of lists of differing tuple + // types. + return "At least one deeply-nested attribute or element is not compatible across both the 'true' and the 'false' value" + } + return fmt.Sprintf( + "The 'true' value is %s, but the 'false' value is %s", + trueTy.FriendlyName(), falseTy.FriendlyName(), + ) + +} + func (e *ConditionalExpr) Range() hcl.Range { return e.SrcRange } @@ -725,6 +956,7 @@ func (e *ObjectConsExpr) walkChildNodes(w internalWalkFunc) { func (e *ObjectConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { var vals map[string]cty.Value var diags hcl.Diagnostics + var marks []cty.ValueMarks // This will get set to true if we fail to produce any of our keys, // either because they are actually unknown or if the evaluation produces @@ -762,6 +994,9 @@ func (e *ObjectConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics continue } + key, keyMarks := key.Unmark() + marks = append(marks, keyMarks) + var err error key, err = convert.Convert(key, cty.String) if err != nil { @@ -791,7 +1026,7 @@ func (e *ObjectConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics return cty.DynamicVal, diags } - return cty.ObjectVal(vals), diags + return cty.ObjectVal(vals).WithMarks(marks...), diags } func (e *ObjectConsExpr) Range() hcl.Range { @@ -921,6 +1156,7 @@ type ForExpr struct { func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { var diags hcl.Diagnostics + var marks []cty.ValueMarks collVal, collDiags := e.CollExpr.Value(ctx) diags = append(diags, collDiags...) @@ -940,6 +1176,10 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { if collVal.Type() == cty.DynamicPseudoType { return cty.DynamicVal, diags } + // Unmark collection before checking for iterability, because marked + // values cannot be iterated + collVal, collMarks := collVal.Unmark() + marks = append(marks, collMarks) if !collVal.CanIterateElements() { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, @@ -1064,7 +1304,11 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { continue } - if include.False() { + // Extract and merge marks from the include expression into the + // main set of marks + includeUnmarked, includeMarks := include.Unmark() + marks = append(marks, includeMarks) + if includeUnmarked.False() { // Skip this element continue } @@ -1109,6 +1353,9 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { continue } + key, keyMarks := key.Unmark() + marks = append(marks, keyMarks) + val, valDiags := e.ValExpr.Value(childCtx) diags = append(diags, valDiags...) @@ -1147,7 +1394,7 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { } } - return cty.ObjectVal(vals), diags + return cty.ObjectVal(vals).WithMarks(marks...), diags } else { // Producing a tuple @@ -1208,7 +1455,11 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { continue } - if include.False() { + // Extract and merge marks from the include expression into the + // main set of marks + includeUnmarked, includeMarks := include.Unmark() + marks = append(marks, includeMarks) + if includeUnmarked.False() { // Skip this element continue } @@ -1223,7 +1474,7 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { return cty.DynamicVal, diags } - return cty.TupleVal(vals), diags + return cty.TupleVal(vals).WithMarks(marks...), diags } } @@ -1286,12 +1537,6 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { } sourceTy := sourceVal.Type() - if sourceTy == cty.DynamicPseudoType { - // If we don't even know the _type_ of our source value yet then - // we'll need to defer all processing, since we can't decide our - // result type either. - return cty.DynamicVal, diags - } // A "special power" of splat expressions is that they can be applied // both to tuples/lists and to other values, and in the latter case @@ -1315,9 +1560,29 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { return cty.DynamicVal, diags } + if sourceTy == cty.DynamicPseudoType { + // If we don't even know the _type_ of our source value yet then + // we'll need to defer all processing, since we can't decide our + // result type either. + return cty.DynamicVal, diags + } + + upgradedUnknown := false if autoUpgrade { + // If we're upgrading an unknown value to a tuple/list, the result + // cannot be known. Otherwise a tuple containing an unknown value will + // upgrade to a different number of elements depending on whether + // sourceVal becomes null or not. + // We record this condition here so we can process any remaining + // expression after the * to verify the result of the traversal. For + // example, it is valid to use a splat on a single object to retrieve a + // list of a single attribute, but we still need to check if that + // attribute actually exists. + upgradedUnknown = !sourceVal.IsKnown() + sourceVal = cty.TupleVal([]cty.Value{sourceVal}) sourceTy = sourceVal.Type() + } // We'll compute our result type lazily if we need it. In the normal case @@ -1359,6 +1624,9 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { return cty.UnknownVal(ty), diags } + // Unmark the collection, and save the marks to apply to the returned + // collection result + sourceVal, marks := sourceVal.Unmark() vals := make([]cty.Value, 0, sourceVal.LengthInt()) it := sourceVal.ElementIterator() if ctx == nil { @@ -1379,6 +1647,10 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { } e.Item.clearValue(ctx) // clean up our temporary value + if upgradedUnknown { + return cty.DynamicVal, diags + } + if !isKnown { // We'll ingore the resultTy diagnostics in this case since they // will just be the same errors we saw while iterating above. @@ -1393,9 +1665,9 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { diags = append(diags, tyDiags...) return cty.ListValEmpty(ty.ElementType()), diags } - return cty.ListVal(vals), diags + return cty.ListVal(vals).WithMarks(marks), diags default: - return cty.TupleVal(vals), diags + return cty.TupleVal(vals).WithMarks(marks), diags } } diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go index 9d425115f..c3f96943d 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go @@ -26,6 +26,9 @@ func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) var diags hcl.Diagnostics isKnown := true + // Maintain a set of marks for values used in the template + marks := make(cty.ValueMarks) + for _, part := range e.Parts { partVal, partDiags := part.Value(ctx) diags = append(diags, partDiags...) @@ -45,6 +48,12 @@ func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) continue } + // Unmark the part and merge its marks into the set + unmarkedVal, partMarks := partVal.Unmark() + for k, v := range partMarks { + marks[k] = v + } + if !partVal.IsKnown() { // If any part is unknown then the result as a whole must be // unknown too. We'll keep on processing the rest of the parts @@ -54,7 +63,7 @@ func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) continue } - strVal, err := convert.Convert(partVal, cty.String) + strVal, err := convert.Convert(unmarkedVal, cty.String) if err != nil { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, @@ -74,11 +83,15 @@ func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) buf.WriteString(strVal.AsString()) } + var ret cty.Value if !isKnown { - return cty.UnknownVal(cty.String), diags + ret = cty.UnknownVal(cty.String) + } else { + ret = cty.StringVal(buf.String()) } - return cty.StringVal(buf.String()), diags + // Apply the full set of marks to the returned value + return ret.WithMarks(marks), diags } func (e *TemplateExpr) Range() hcl.Range { @@ -139,6 +152,8 @@ func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti return cty.UnknownVal(cty.String), diags } + tuple, marks := tuple.Unmark() + allMarks := []cty.ValueMarks{marks} buf := &bytes.Buffer{} it := tuple.ElementIterator() for it.Next() { @@ -158,7 +173,7 @@ func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti continue } if val.Type() == cty.DynamicPseudoType { - return cty.UnknownVal(cty.String), diags + return cty.UnknownVal(cty.String).WithMarks(marks), diags } strVal, err := convert.Convert(val, cty.String) if err != nil { @@ -176,13 +191,17 @@ func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti continue } if !val.IsKnown() { - return cty.UnknownVal(cty.String), diags + return cty.UnknownVal(cty.String).WithMarks(marks), diags } + strVal, strValMarks := strVal.Unmark() + if len(strValMarks) > 0 { + allMarks = append(allMarks, strValMarks) + } buf.WriteString(strVal.AsString()) } - return cty.StringVal(buf.String()), diags + return cty.StringVal(buf.String()).WithMarks(allMarks...), diags } func (e *TemplateJoinExpr) Range() hcl.Range { diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go index f67d989e5..ec83d3dc2 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go @@ -6,7 +6,7 @@ import ( "strconv" "unicode/utf8" - "github.com/apparentlymart/go-textseg/textseg" + "github.com/apparentlymart/go-textseg/v13/textseg" "github.com/hashicorp/hcl/v2" "github.com/zclconf/go-cty/cty" ) @@ -76,14 +76,37 @@ Token: default: bad := p.Read() if !p.recovery { - if bad.Type == TokenOQuote { + switch bad.Type { + case TokenOQuote: diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Invalid argument name", Detail: "Argument names must not be quoted.", Subject: &bad.Range, }) - } else { + case TokenEOF: + switch end { + case TokenCBrace: + // If we're looking for a closing brace then we're parsing a block + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed configuration block", + Detail: "There is no closing brace for this block before the end of the file. This may be caused by incorrect brace nesting elsewhere in this file.", + Subject: &startRange, + }) + default: + // The only other "end" should itself be TokenEOF (for + // the top-level body) and so we shouldn't get here, + // but we'll return a generic error message anyway to + // be resilient. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed configuration body", + Detail: "Found end of file before the end of this configuration body.", + Subject: &startRange, + }) + } + default: diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Argument or block definition required", @@ -144,8 +167,6 @@ func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) { }, } } - - return nil, nil } // parseSingleAttrBody is a weird variant of ParseBody that deals with the @@ -388,12 +409,23 @@ Token: // user intent for this one, we'll skip it if we're already in // recovery mode. if !p.recovery { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid single-argument block definition", - Detail: "A single-line block definition must end with a closing brace immediately after its single argument definition.", - Subject: p.Peek().Range.Ptr(), - }) + switch p.Peek().Type { + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed configuration block", + Detail: "There is no closing brace for this block before the end of the file. This may be caused by incorrect brace nesting elsewhere in this file.", + Subject: oBrace.Range.Ptr(), + Context: hcl.RangeBetween(ident.Range, oBrace.Range).Ptr(), + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid single-argument block definition", + Detail: "A single-line block definition must end with a closing brace immediately after its single argument definition.", + Subject: p.Peek().Range.Ptr(), + }) + } } p.recover(TokenCBrace) } @@ -670,6 +702,7 @@ Traversal: trav := make(hcl.Traversal, 0, 1) var firstRange, lastRange hcl.Range firstRange = p.NextRange() + lastRange = marker.Range for p.Peek().Type == TokenDot { dot := p.Read() @@ -910,7 +943,7 @@ func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) { switch start.Type { case TokenOParen: - p.Read() // eat open paren + oParen := p.Read() // eat open paren p.PushIncludeNewlines(false) @@ -936,9 +969,19 @@ func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) { p.setRecovery() } - p.Read() // eat closing paren + cParen := p.Read() // eat closing paren p.PopIncludeNewlines() + // Our parser's already taken care of the precedence effect of the + // parentheses by considering them to be a kind of "term", but we + // still need to include the parentheses in our AST so we can give + // an accurate representation of the source range that includes the + // open and closing parentheses. + expr = &ParenthesesExpr{ + Expression: expr, + SrcRange: hcl.RangeBetween(oParen.Range, cParen.Range), + } + return expr, diags case TokenNumberLit: @@ -1048,12 +1091,22 @@ func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) { default: var diags hcl.Diagnostics if !p.recovery { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid expression", - Detail: "Expected the start of an expression, but found an invalid expression token.", - Subject: &start.Range, - }) + switch start.Type { + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing expression", + Detail: "Expected the start of an expression, but found the end of the file.", + Subject: &start.Range, + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expression", + Detail: "Expected the start of an expression, but found an invalid expression token.", + Subject: &start.Range, + }) + } } p.setRecovery() @@ -1152,13 +1205,23 @@ Token: } if sep.Type != TokenComma { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing argument separator", - Detail: "A comma is required to separate each function argument from the next.", - Subject: &sep.Range, - Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(), - }) + switch sep.Type { + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated function call", + Detail: "There is no closing parenthesis for this function call before the end of the file. This may be caused by incorrect parethesis nesting elsewhere in this file.", + Subject: hcl.RangeBetween(name.Range, openTok.Range).Ptr(), + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing argument separator", + Detail: "A comma is required to separate each function argument from the next.", + Subject: &sep.Range, + Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(), + }) + } closeTok = p.recover(TokenCParen) break Token } @@ -1231,13 +1294,23 @@ func (p *parser) parseTupleCons() (Expression, hcl.Diagnostics) { if next.Type != TokenComma { if !p.recovery { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing item separator", - Detail: "Expected a comma to mark the beginning of the next item.", - Subject: &next.Range, - Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), - }) + switch next.Type { + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated tuple constructor expression", + Detail: "There is no corresponding closing bracket before the end of the file. This may be caused by incorrect bracket nesting elsewhere in this file.", + Subject: open.Range.Ptr(), + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing item separator", + Detail: "Expected a comma to mark the beginning of the next item.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } } close = p.recover(TokenCBrack) break @@ -1348,6 +1421,13 @@ func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) { Subject: &next.Range, Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), }) + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated object constructor expression", + Detail: "There is no corresponding closing brace before the end of the file. This may be caused by incorrect brace nesting elsewhere in this file.", + Subject: open.Range.Ptr(), + }) default: diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, @@ -1388,13 +1468,23 @@ func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) { if next.Type != TokenComma && next.Type != TokenNewline { if !p.recovery { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing attribute separator", - Detail: "Expected a newline or comma to mark the beginning of the next attribute.", - Subject: &next.Range, - Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), - }) + switch next.Type { + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated object constructor expression", + Detail: "There is no corresponding closing brace before the end of the file. This may be caused by incorrect brace nesting elsewhere in this file.", + Subject: open.Range.Ptr(), + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute separator", + Detail: "Expected a newline or comma to mark the beginning of the next attribute.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } } close = p.recover(TokenCBrace) break @@ -1650,7 +1740,7 @@ func (p *parser) parseQuotedStringLiteral() (string, hcl.Range, hcl.Diagnostics) var diags hcl.Diagnostics ret := &bytes.Buffer{} - var cQuote Token + var endRange hcl.Range Token: for { @@ -1658,7 +1748,7 @@ Token: switch tok.Type { case TokenCQuote: - cQuote = tok + endRange = tok.Range break Token case TokenQuotedLit: @@ -1701,6 +1791,7 @@ Token: Subject: &tok.Range, Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), }) + endRange = tok.Range break Token default: @@ -1713,13 +1804,14 @@ Token: Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), }) p.recover(TokenCQuote) + endRange = tok.Range break Token } } - return ret.String(), hcl.RangeBetween(oQuote.Range, cQuote.Range), diags + return ret.String(), hcl.RangeBetween(oQuote.Range, endRange), diags } // ParseStringLiteralToken processes the given token, which must be either a diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go index eb8f7ea38..ae8805856 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go @@ -5,7 +5,7 @@ import ( "strings" "unicode" - "github.com/apparentlymart/go-textseg/textseg" + "github.com/apparentlymart/go-textseg/v13/textseg" "github.com/hashicorp/hcl/v2" "github.com/zclconf/go-cty/cty" ) @@ -413,13 +413,44 @@ Token: close := p.Peek() if close.Type != TokenTemplateSeqEnd { if !p.recovery { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extra characters after interpolation expression", - Detail: "Expected a closing brace to end the interpolation expression, but found extra characters.", - Subject: &close.Range, - Context: hcl.RangeBetween(startRange, close.Range).Ptr(), - }) + switch close.Type { + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed template interpolation sequence", + Detail: "There is no closing brace for this interpolation sequence before the end of the file. This might be caused by incorrect nesting inside the given expression.", + Subject: &startRange, + }) + case TokenColon: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extra characters after interpolation expression", + Detail: "Template interpolation doesn't expect a colon at this location. Did you intend this to be a literal sequence to be processed as part of another language? If so, you can escape it by starting with \"$${\" instead of just \"${\".", + Subject: &close.Range, + Context: hcl.RangeBetween(startRange, close.Range).Ptr(), + }) + default: + if (close.Type == TokenCQuote || close.Type == TokenOQuote) && end == TokenCQuote { + // We'll get here if we're processing a _quoted_ + // template and we find an errant quote inside an + // interpolation sequence, which suggests that + // the interpolation sequence is missing its terminator. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed template interpolation sequence", + Detail: "There is no closing brace for this interpolation sequence before the end of the quoted template. This might be caused by incorrect nesting inside the given expression.", + Subject: &startRange, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extra characters after interpolation expression", + Detail: "Expected a closing brace to end the interpolation expression, but found extra characters.\n\nThis can happen when you include interpolation syntax for another language, such as shell scripting, but forget to escape the interpolation start token. If this is an embedded sequence for another language, escape it by starting with \"$${\" instead of just \"${\".", + Subject: &close.Range, + Context: hcl.RangeBetween(startRange, close.Range).Ptr(), + }) + } + } } p.recover(TokenTemplateSeqEnd) } else { diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md index 3fc5f5f1b..33c152daa 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md @@ -273,7 +273,7 @@ tuple = "[" ( object = "{" ( (objectelem ("," objectelem)* ","?)? ) "}"; -objectelem = (Identifier | Expression) "=" Expression; +objectelem = (Identifier | Expression) ("=" | ":") Expression; ``` Only tuple and object values can be directly constructed via native syntax. @@ -293,18 +293,20 @@ Between the open and closing delimiters of these sequences, newline sequences are ignored as whitespace. There is a syntax ambiguity between _for expressions_ and collection values -whose first element is a reference to a variable named `for`. The -_for expression_ interpretation has priority, so to produce a tuple whose -first element is the value of a variable named `for`, or an object with a -key named `for`, use parentheses to disambiguate: +whose first element starts with an identifier named `for`. The _for expression_ +interpretation has priority, so to write a key literally named `for` +or an expression derived from a variable named `for` you must use parentheses +or quotes to disambiguate: - `[for, foo, baz]` is a syntax error. - `[(for), foo, baz]` is a tuple whose first element is the value of variable `for`. -- `{for: 1, baz: 2}` is a syntax error. -- `{(for): 1, baz: 2}` is an object with an attribute literally named `for`. -- `{baz: 2, for: 1}` is equivalent to the previous example, and resolves the +- `{for = 1, baz = 2}` is a syntax error. +- `{"for" = 1, baz = 2}` is an object with an attribute literally named `for`. +- `{baz = 2, for = 1}` is equivalent to the previous example, and resolves the ambiguity by reordering. +- `{(for) = 1, baz = 2}` is an object with a key with the same value as the + variable `for`. ### Template Expressions @@ -489,7 +491,7 @@ that were produced against each distinct key. - `[for v in ["a", "b"]: v]` returns `["a", "b"]`. - `[for i, v in ["a", "b"]: i]` returns `[0, 1]`. - `{for i, v in ["a", "b"]: v => i}` returns `{a = 0, b = 1}`. -- `{for i, v in ["a", "a", "b"]: k => v}` produces an error, because attribute +- `{for i, v in ["a", "a", "b"]: v => i}` produces an error, because attribute `a` is defined twice. - `{for i, v in ["a", "a", "b"]: v => i...}` returns `{a = [0, 1], b = [2]}`. @@ -888,7 +890,7 @@ as templates. - `hello ${true}` produces the string `"hello true"` - `${""}${true}` produces the string `"true"` because there are two interpolation sequences, even though one produces an empty result. -- `%{ for v in [true] }${v}%{ endif }` produces the string `true` because +- `%{ for v in [true] }${v}%{ endfor }` produces the string `true` because the presence of the `for` directive circumvents the unwrapping even though the final result is a single value. diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go index 2f7470c77..f42ae918e 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go @@ -13,17 +13,12 @@ func (b *Block) AsHCLBlock() *hcl.Block { return nil } - lastHeaderRange := b.TypeRange - if len(b.LabelRanges) > 0 { - lastHeaderRange = b.LabelRanges[len(b.LabelRanges)-1] - } - return &hcl.Block{ Type: b.Type, Labels: b.Labels, Body: b.Body, - DefRange: hcl.RangeBetween(b.TypeRange, lastHeaderRange), + DefRange: b.DefRange(), TypeRange: b.TypeRange, LabelRanges: b.LabelRanges, } @@ -40,7 +35,7 @@ type Body struct { hiddenBlocks map[string]struct{} SrcRange hcl.Range - EndRange hcl.Range // Final token of the body, for reporting missing items + EndRange hcl.Range // Final token of the body (zero-length range) } // Assert that *Body implements hcl.Body @@ -390,5 +385,9 @@ func (b *Block) Range() hcl.Range { } func (b *Block) DefRange() hcl.Range { - return hcl.RangeBetween(b.TypeRange, b.OpenBraceRange) + lastHeaderRange := b.TypeRange + if len(b.LabelRanges) > 0 { + lastHeaderRange = b.LabelRanges[len(b.LabelRanges)-1] + } + return hcl.RangeBetween(b.TypeRange, lastHeaderRange) } diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go index c7ffe2073..5ef093f9a 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go @@ -4,7 +4,7 @@ import ( "bytes" "fmt" - "github.com/apparentlymart/go-textseg/textseg" + "github.com/apparentlymart/go-textseg/v13/textseg" "github.com/hashicorp/hcl/v2" ) @@ -191,8 +191,10 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { toldBadUTF8 := 0 for _, tok := range tokens { - // copy token so it's safe to point to it - tok := tok + tokRange := func() *hcl.Range { + r := tok.Range + return &r + } switch tok.Type { case TokenBitwiseAnd, TokenBitwiseOr, TokenBitwiseXor, TokenBitwiseNot: @@ -202,7 +204,7 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { case TokenBitwiseAnd: suggestion = " Did you mean boolean AND (\"&&\")?" case TokenBitwiseOr: - suggestion = " Did you mean boolean OR (\"&&\")?" + suggestion = " Did you mean boolean OR (\"||\")?" case TokenBitwiseNot: suggestion = " Did you mean boolean NOT (\"!\")?" } @@ -211,7 +213,7 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { Severity: hcl.DiagError, Summary: "Unsupported operator", Detail: fmt.Sprintf("Bitwise operators are not supported.%s", suggestion), - Subject: &tok.Range, + Subject: tokRange(), }) toldBitwise++ } @@ -221,7 +223,7 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { Severity: hcl.DiagError, Summary: "Unsupported operator", Detail: "\"**\" is not a supported operator. Exponentiation is not supported as an operator.", - Subject: &tok.Range, + Subject: tokRange(), }) toldExponent++ @@ -234,7 +236,7 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { Severity: hcl.DiagError, Summary: "Invalid character", Detail: "The \"`\" character is not valid. To create a multi-line string, use the \"heredoc\" syntax, like \"< 0 { + return fileExtJson, value.JSON + } + + if len(value.MsgPack) > 0 { + return fileExtMsgpack, value.MsgPack + } + + return fileExtEmpty, nil +} + +func protocolDataDynamicValue6(_ context.Context, value *tfprotov6.DynamicValue) (string, []byte) { + if value == nil { + return fileExtEmpty, nil + } + + // (tfprotov6.DynamicValue).Unmarshal() prefers JSON first, so prefer to + // output JSON if found. + if len(value.JSON) > 0 { + return fileExtJson, value.JSON + } + + if len(value.MsgPack) > 0 { + return fileExtMsgpack, value.MsgPack + } + + return fileExtEmpty, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/provider.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/provider.go new file mode 100644 index 000000000..6c68504c1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/provider.go @@ -0,0 +1,19 @@ +package logging + +import ( + "log" + "strings" + + tfaddr "github.com/hashicorp/terraform-registry-address" +) + +func ProviderLoggerName(providerAddress string) string { + provider, err := tfaddr.ParseRawProviderSourceString(providerAddress) + + if err != nil { + log.Printf("[ERROR] Error parsing provider name %q: %s", providerAddress, err) + return "" + } + + return strings.ReplaceAll(provider.Type, "-", "_") +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/generate.sh b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/generate.sh deleted file mode 100644 index ac9ec74b3..000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/generate.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -# We do not run protoc under go:generate because we want to ensure that all -# dependencies of go:generate are "go get"-able for general dev environment -# usability. To compile all protobuf files in this repository, run -# "make protobuf" at the top-level. - -set -eu - -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done -DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" - -cd "$DIR" - -protoc --proto_path=. --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative tfplugin5.proto diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go index 793eed7a8..326a58bba 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go @@ -19,14 +19,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 +// protoc-gen-go v1.28.0 +// protoc v3.19.4 // source: tfplugin5.proto package tfplugin5 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -40,10 +39,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type StringKind int32 const ( diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go index 2a1c19ad1..ddbdea31e 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.19.4 +// source: tfplugin5.proto package tfplugin5 @@ -11,6 +15,7 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 // ProviderClient is the client API for Provider service. @@ -223,8 +228,8 @@ type UnsafeProviderServer interface { mustEmbedUnimplementedProviderServer() } -func RegisterProviderServer(s *grpc.Server, srv ProviderServer) { - s.RegisterService(&_Provider_serviceDesc, srv) +func RegisterProviderServer(s grpc.ServiceRegistrar, srv ProviderServer) { + s.RegisterService(&Provider_ServiceDesc, srv) } func _Provider_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { @@ -443,7 +448,10 @@ func _Provider_Stop_Handler(srv interface{}, ctx context.Context, dec func(inter return interceptor(ctx, in, info, handler) } -var _Provider_serviceDesc = grpc.ServiceDesc{ +// Provider_ServiceDesc is the grpc.ServiceDesc for Provider service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Provider_ServiceDesc = grpc.ServiceDesc{ ServiceName: "tfplugin5.Provider", HandlerType: (*ProviderServer)(nil), Methods: []grpc.MethodDesc{ @@ -537,7 +545,7 @@ func (c *provisionerClient) ValidateProvisionerConfig(ctx context.Context, in *V } func (c *provisionerClient) ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) { - stream, err := c.cc.NewStream(ctx, &_Provisioner_serviceDesc.Streams[0], "/tfplugin5.Provisioner/ProvisionResource", opts...) + stream, err := c.cc.NewStream(ctx, &Provisioner_ServiceDesc.Streams[0], "/tfplugin5.Provisioner/ProvisionResource", opts...) if err != nil { return nil, err } @@ -613,8 +621,8 @@ type UnsafeProvisionerServer interface { mustEmbedUnimplementedProvisionerServer() } -func RegisterProvisionerServer(s *grpc.Server, srv ProvisionerServer) { - s.RegisterService(&_Provisioner_serviceDesc, srv) +func RegisterProvisionerServer(s grpc.ServiceRegistrar, srv ProvisionerServer) { + s.RegisterService(&Provisioner_ServiceDesc, srv) } func _Provisioner_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { @@ -692,7 +700,10 @@ func _Provisioner_Stop_Handler(srv interface{}, ctx context.Context, dec func(in return interceptor(ctx, in, info, handler) } -var _Provisioner_serviceDesc = grpc.ServiceDesc{ +// Provisioner_ServiceDesc is the grpc.ServiceDesc for Provisioner service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Provisioner_ServiceDesc = grpc.ServiceDesc{ ServiceName: "tfplugin5.Provisioner", HandlerType: (*ProvisionerServer)(nil), Methods: []grpc.MethodDesc{ diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go index c3fdd6ce4..5f9549965 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go @@ -98,6 +98,9 @@ type UpgradeResourceStateResponse struct { // // The state should be represented as a tftypes.Object, with each // attribute and nested block getting its own key and value. + // + // Terraform CLI 0.12 through 0.14 require the Msgpack field to be + // populated or an EOF error will be returned. UpgradedState *DynamicValue // Diagnostics report errors or warnings related to upgrading the diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/schema.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/schema.go index 5f1acc34d..b0b6dff28 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/schema.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/schema.go @@ -68,6 +68,19 @@ type Schema struct { Block *SchemaBlock } +// ValueType returns the tftypes.Type for a Schema. +// +// If Schema is missing, an empty Object is returned. +func (s *Schema) ValueType() tftypes.Type { + if s == nil { + return tftypes.Object{ + AttributeTypes: map[string]tftypes.Type{}, + } + } + + return s.Block.ValueType() +} + // SchemaBlock represents a block in a schema. Blocks are how Terraform creates // groupings of attributes. In configurations, they don't use the equals sign // and use dynamic instead of list comprehensions. @@ -105,6 +118,51 @@ type SchemaBlock struct { Deprecated bool } +// ValueType returns the tftypes.Type for a SchemaBlock. +// +// If SchemaBlock is missing, an empty Object is returned. +func (s *SchemaBlock) ValueType() tftypes.Type { + if s == nil { + return tftypes.Object{ + AttributeTypes: map[string]tftypes.Type{}, + } + } + + attributeTypes := map[string]tftypes.Type{} + + for _, attribute := range s.Attributes { + if attribute == nil { + continue + } + + attributeType := attribute.ValueType() + + if attributeType == nil { + continue + } + + attributeTypes[attribute.Name] = attributeType + } + + for _, block := range s.BlockTypes { + if block == nil { + continue + } + + blockType := block.ValueType() + + if blockType == nil { + continue + } + + attributeTypes[block.TypeName] = blockType + } + + return tftypes.Object{ + AttributeTypes: attributeTypes, + } +} + // SchemaAttribute represents a single attribute within a schema block. // Attributes are the fields users can set in configuration using the equals // sign, can assign to variables, can interpolate, and can use list @@ -162,6 +220,17 @@ type SchemaAttribute struct { Deprecated bool } +// ValueType returns the tftypes.Type for a SchemaAttribute. +// +// If SchemaAttribute is missing, nil is returned. +func (s *SchemaAttribute) ValueType() tftypes.Type { + if s == nil { + return nil + } + + return s.Type +} + // SchemaNestedBlock is a nested block within another block. See SchemaBlock // for more information on blocks. type SchemaNestedBlock struct { @@ -198,6 +267,39 @@ type SchemaNestedBlock struct { MaxItems int64 } +// ValueType returns the tftypes.Type for a SchemaNestedBlock. +// +// If SchemaNestedBlock is missing or the Nesting mode is invalid, nil is +// returned. +func (s *SchemaNestedBlock) ValueType() tftypes.Type { + if s == nil { + return nil + } + + blockType := s.Block.ValueType() + + switch s.Nesting { + case SchemaNestedBlockNestingModeGroup: + return blockType + case SchemaNestedBlockNestingModeList: + return tftypes.List{ + ElementType: blockType, + } + case SchemaNestedBlockNestingModeMap: + return tftypes.Map{ + ElementType: blockType, + } + case SchemaNestedBlockNestingModeSet: + return tftypes.Set{ + ElementType: blockType, + } + case SchemaNestedBlockNestingModeSingle: + return blockType + default: + return nil + } +} + // SchemaNestedBlockNestingMode indicates the nesting mode for // SchemaNestedBlocks. The nesting mode determines the number of instances of // the block allowed, how many labels the block expects, and the data structure diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go index 47426ada4..9ec7abefb 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go @@ -2,51 +2,82 @@ package tf5server import ( "context" + "encoding/json" "errors" - "log" + "fmt" + "os" + "os/signal" "regexp" + "runtime" "strings" "sync" + "time" + "github.com/hashicorp/terraform-plugin-go/internal/logging" "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto" "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto" + "google.golang.org/grpc" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" - "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-log/tfsdklog" - tfaddr "github.com/hashicorp/terraform-registry-address" testing "github.com/mitchellh/go-testing-interface" ) -const tflogSubsystemName = "proto" - -// Global logging keys attached to all requests. -// -// Practitioners or tooling reading logs may be depending on these keys, so be -// conscious of that when changing them. const ( - // A unique ID for the RPC request - logKeyRequestID = "tf_req_id" - - // The full address of the provider, such as - // registry.terraform.io/hashicorp/random - logKeyProviderAddress = "tf_provider_addr" - - // The RPC being run, such as "ApplyResourceChange" - logKeyRPC = "tf_rpc" + // protocolVersionMajor represents the major version number of the protocol + // being served. This is used during the plugin handshake to validate the + // server and client are compatible. + // + // In the future, it may be possible to include this information directly + // in the protocol buffers rather than recreating a constant here. + protocolVersionMajor uint = 5 + + // protocolVersionMinor represents the minor version number of the protocol + // being served. Backwards compatible additions are possible in the + // protocol definitions, which is when this may be increased. While it is + // not used in plugin negotiation, it can be helpful to include this value + // for debugging, such as in logs. + // + // In the future, it may be possible to include this information directly + // in the protocol buffers rather than recreating a constant here. + protocolVersionMinor uint = 2 +) - // The type of resource being operated on, such as "random_pet" - logKeyResourceType = "tf_resource_type" +// protocolVersion represents the combined major and minor version numbers of +// the protocol being served. +var protocolVersion string = fmt.Sprintf("%d.%d", protocolVersionMajor, protocolVersionMinor) - // The type of data source being operated on, such as "archive_file" - logKeyDataSourceType = "tf_data_source_type" +const ( + // envTfReattachProviders is the environment variable used by Terraform CLI + // to directly connect to already running provider processes, such as those + // being inspected by debugging processes. When connecting to providers in + // this manner, Terraform CLI disables certain plugin handshake checks and + // will not stop the provider process. + envTfReattachProviders = "TF_REATTACH_PROVIDERS" +) - // The protocol version being used, as a string, such as "5" - logKeyProtocolVersion = "tf_proto_version" +const ( + // grpcMaxMessageSize is the maximum gRPC send and receive message sizes + // for the server. + // + // This 256MB value is arbitrarily raised from the default message sizes of + // 4MB to account for advanced use cases, but arbitrarily lowered from + // MaxInt32 (or similar) to prevent incorrect server implementations from + // exhausting resources in common execution environments. Receiving a gRPC + // message size error is preferable for troubleshooting over determining + // why an execution environment may have terminated the process via its + // memory management processes, such as oom-killer on Linux. + // + // This value is kept as constant over allowing server configurability + // since there are many factors that influence message size, such as + // Terraform configuration and state data. If larger message size use + // cases appear, other gRPC options should be explored, such as + // implementing streaming RPCs and messages. + grpcMaxMessageSize = 256 << 20 ) // ServeOpt is an interface for defining options that can be passed to the @@ -65,6 +96,10 @@ type ServeConfig struct { debugCh chan *plugin.ReattachConfig debugCloseCh chan struct{} + managedDebug bool + managedDebugReattachConfigTimeout time.Duration + managedDebugStopSignals []os.Signal + disableLogInitStderr bool disableLogLocation bool useLoggingSink testing.T @@ -79,8 +114,17 @@ func (s serveConfigFunc) ApplyServeOpt(in *ServeConfig) error { // WithDebug returns a ServeOpt that will set the server into debug mode, using // the passed options to populate the go-plugin ServeTestConfig. +// +// This is an advanced ServeOpt that assumes the caller will fully manage the +// reattach configuration and server lifecycle. Refer to WithManagedDebug for a +// ServeOpt that handles common use cases, such as implementing provider main +// functions. func WithDebug(ctx context.Context, config chan *plugin.ReattachConfig, closeCh chan struct{}) ServeOpt { return serveConfigFunc(func(in *ServeConfig) error { + if in.managedDebug { + return errors.New("cannot set both WithDebug and WithManagedDebug") + } + in.debugCtx = ctx in.debugCh = config in.debugCloseCh = closeCh @@ -88,6 +132,47 @@ func WithDebug(ctx context.Context, config chan *plugin.ReattachConfig, closeCh }) } +// WithManagedDebug returns a ServeOpt that will start the server in debug +// mode, managing the reattach configuration handling and server lifecycle. +// Reattach configuration is output to stdout with human friendly instructions. +// By default, the server can be stopped with os.Interrupt (SIGINT; ctrl-c). +// +// Refer to the optional WithManagedDebugStopSignals and +// WithManagedDebugReattachConfigTimeout ServeOpt for additional configuration. +// +// The reattach configuration output of this handling is not protected by +// compatibility guarantees. Use the WithDebug ServeOpt for advanced use cases. +func WithManagedDebug() ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + if in.debugCh != nil { + return errors.New("cannot set both WithDebug and WithManagedDebug") + } + + in.managedDebug = true + return nil + }) +} + +// WithManagedDebugStopSignals returns a ServeOpt that will set the stop signals for a +// debug managed process (WithManagedDebug). When not configured, os.Interrupt +// (SIGINT; Ctrl-c) will stop the process. +func WithManagedDebugStopSignals(signals []os.Signal) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.managedDebugStopSignals = signals + return nil + }) +} + +// WithManagedDebugReattachConfigTimeout returns a ServeOpt that will set the timeout +// for a debug managed process to start and return its reattach configuration. +// When not configured, 2 seconds is the default. +func WithManagedDebugReattachConfigTimeout(timeout time.Duration) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.managedDebugReattachConfigTimeout = timeout + return nil + }) +} + // WithGoPluginLogger returns a ServeOpt that will set the logger that // go-plugin should use to log messages. func WithGoPluginLogger(logger hclog.Logger) ServeOpt { @@ -152,29 +237,68 @@ func WithLogEnvVarName(name string) ServeOpt { // modify the logger that go-plugin is using, ServeOpts can be specified to // support that. func Serve(name string, serverFactory func() tfprotov5.ProviderServer, opts ...ServeOpt) error { - var conf ServeConfig + // Defaults + conf := ServeConfig{ + managedDebugReattachConfigTimeout: 2 * time.Second, + managedDebugStopSignals: []os.Signal{os.Interrupt}, + } + for _, opt := range opts { err := opt.ApplyServeOpt(&conf) if err != nil { return err } } + serveConfig := &plugin.ServeConfig{ HandshakeConfig: plugin.HandshakeConfig{ - ProtocolVersion: 5, + ProtocolVersion: protocolVersionMajor, MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", }, Plugins: plugin.PluginSet{ "provider": &GRPCProviderPlugin{ + Name: name, + Opts: opts, GRPCProvider: serverFactory, }, }, - GRPCServer: plugin.DefaultGRPCServer, + GRPCServer: func(opts []grpc.ServerOption) *grpc.Server { + opts = append(opts, grpc.MaxRecvMsgSize(grpcMaxMessageSize)) + opts = append(opts, grpc.MaxSendMsgSize(grpcMaxMessageSize)) + + return grpc.NewServer(opts...) + }, } + if conf.logger != nil { serveConfig.Logger = conf.logger } + + if conf.managedDebug { + ctx, cancel := context.WithCancel(context.Background()) + signalCh := make(chan os.Signal, len(conf.managedDebugStopSignals)) + + signal.Notify(signalCh, conf.managedDebugStopSignals...) + + defer func() { + signal.Stop(signalCh) + cancel() + }() + + go func() { + select { + case <-signalCh: + cancel() + case <-ctx.Done(): + } + }() + + conf.debugCh = make(chan *plugin.ReattachConfig) + conf.debugCloseCh = make(chan struct{}) + conf.debugCtx = ctx + } + if conf.debugCh != nil { serveConfig.Test = &plugin.ServeTestConfig{ Context: conf.debugCtx, @@ -182,7 +306,78 @@ func Serve(name string, serverFactory func() tfprotov5.ProviderServer, opts ...S CloseCh: conf.debugCloseCh, } } - plugin.Serve(serveConfig) + + if !conf.managedDebug { + plugin.Serve(serveConfig) + return nil + } + + go plugin.Serve(serveConfig) + + var pluginReattachConfig *plugin.ReattachConfig + + select { + case pluginReattachConfig = <-conf.debugCh: + case <-time.After(conf.managedDebugReattachConfigTimeout): + return errors.New("timeout waiting on reattach configuration") + } + + if pluginReattachConfig == nil { + return errors.New("nil reattach configuration received") + } + + // Duplicate implementation is required because the go-plugin + // ReattachConfig.Addr implementation is not friendly for JSON encoding + // and to avoid importing terraform-exec. + type reattachConfigAddr struct { + Network string + String string + } + + type reattachConfig struct { + Protocol string + ProtocolVersion int + Pid int + Test bool + Addr reattachConfigAddr + } + + reattachBytes, err := json.Marshal(map[string]reattachConfig{ + name: { + Protocol: string(pluginReattachConfig.Protocol), + ProtocolVersion: pluginReattachConfig.ProtocolVersion, + Pid: pluginReattachConfig.Pid, + Test: pluginReattachConfig.Test, + Addr: reattachConfigAddr{ + Network: pluginReattachConfig.Addr.Network(), + String: pluginReattachConfig.Addr.String(), + }, + }, + }) + + if err != nil { + return fmt.Errorf("Error building reattach string: %w", err) + } + + reattachStr := string(reattachBytes) + + // This is currently intended to be executed via provider main function and + // human friendly, so output directly to stdout. + fmt.Printf("Provider started. To attach Terraform CLI, set the %s environment variable with the following:\n\n", envTfReattachProviders) + + switch runtime.GOOS { + case "windows": + fmt.Printf("\tCommand Prompt:\tset \"%s=%s\"\n", envTfReattachProviders, reattachStr) + fmt.Printf("\tPowerShell:\t$env:%s='%s'\n", envTfReattachProviders, strings.ReplaceAll(reattachStr, `'`, `''`)) + default: + fmt.Printf("\t%s='%s'\n", envTfReattachProviders, strings.ReplaceAll(reattachStr, `'`, `'"'"'`)) + } + + fmt.Println("") + + // Wait for the server to be done. + <-conf.debugCloseCh + return nil } @@ -198,6 +393,13 @@ type server struct { useTFLogSink bool testHandle testing.T name string + + // protocolDataDir is a directory to store raw protocol data files for + // debugging purposes. + protocolDataDir string + + // protocolVersion is the protocol version for the server. + protocolVersion string } func mergeStop(ctx context.Context, cancel context.CancelFunc, stopCh chan struct{}) { @@ -230,50 +432,11 @@ func (s *server) loggingContext(ctx context.Context) context.Context { ctx = tfsdklog.RegisterTestSink(ctx, s.testHandle) } - // generate a request ID - reqID, err := uuid.GenerateUUID() - if err != nil { - reqID = "unable to assign request ID: " + err.Error() - } - - // set up the logger SDK loggers are derived from - ctx = tfsdklog.NewRootSDKLogger(ctx, append(tfsdklog.Options{ - tfsdklog.WithLevelFromEnv("TF_LOG_SDK"), - }, s.tflogSDKOpts...)...) - ctx = tfsdklog.With(ctx, logKeyRequestID, reqID) - ctx = tfsdklog.With(ctx, logKeyProviderAddress, s.name) - - // set up our protocol-level subsystem logger - ctx = tfsdklog.NewSubsystem(ctx, tflogSubsystemName, append(tfsdklog.Options{ - tfsdklog.WithLevelFromEnv("TF_LOG_SDK_PROTO"), - }, s.tflogSDKOpts...)...) - ctx = tfsdklog.SubsystemWith(ctx, tflogSubsystemName, logKeyProtocolVersion, "5") - - // set up the provider logger - ctx = tfsdklog.NewRootProviderLogger(ctx, s.tflogOpts...) - ctx = tflog.With(ctx, logKeyRequestID, reqID) - ctx = tflog.With(ctx, logKeyProviderAddress, s.name) - return ctx -} - -func rpcLoggingContext(ctx context.Context, rpc string) context.Context { - ctx = tfsdklog.With(ctx, logKeyRPC, rpc) - ctx = tfsdklog.SubsystemWith(ctx, tflogSubsystemName, logKeyRPC, rpc) - ctx = tflog.With(ctx, logKeyRPC, rpc) - return ctx -} + ctx = logging.InitContext(ctx, s.tflogSDKOpts, s.tflogOpts) + ctx = logging.RequestIdContext(ctx) + ctx = logging.ProviderAddressContext(ctx, s.name) + ctx = logging.ProtocolVersionContext(ctx, s.protocolVersion) -func resourceLoggingContext(ctx context.Context, resource string) context.Context { - ctx = tfsdklog.With(ctx, logKeyResourceType, resource) - ctx = tfsdklog.SubsystemWith(ctx, tflogSubsystemName, logKeyResourceType, resource) - ctx = tflog.With(ctx, logKeyResourceType, resource) - return ctx -} - -func dataSourceLoggingContext(ctx context.Context, dataSource string) context.Context { - ctx = tfsdklog.With(ctx, logKeyDataSourceType, dataSource) - ctx = tfsdklog.SubsystemWith(ctx, tflogSubsystemName, logKeyDataSourceType, dataSource) - ctx = tflog.With(ctx, logKeyDataSourceType, dataSource) return ctx } @@ -301,98 +464,103 @@ func New(name string, serve tfprotov5.ProviderServer, opts ...ServeOpt) tfplugin } envVar := conf.envVar if envVar == "" { - addr, err := tfaddr.ParseRawProviderSourceString(name) - if err != nil { - log.Printf("[ERROR] Error parsing provider name %q: %s", name, err) - } else { - envVar = addr.Type - } + envVar = logging.ProviderLoggerName(name) } - envVar = strings.ReplaceAll(envVar, "-", "_") if envVar != "" { - options = append(options, tfsdklog.WithLogName(envVar), tflog.WithLevelFromEnv("TF_LOG_PROVIDER", envVar)) + options = append(options, tfsdklog.WithLogName(envVar), tflog.WithLevelFromEnv(logging.EnvTfLogProvider, envVar)) } return &server{ - downstream: serve, - stopCh: make(chan struct{}), - tflogOpts: options, - tflogSDKOpts: sdkOptions, - name: name, - useTFLogSink: conf.useLoggingSink != nil, - testHandle: conf.useLoggingSink, + downstream: serve, + stopCh: make(chan struct{}), + tflogOpts: options, + tflogSDKOpts: sdkOptions, + name: name, + useTFLogSink: conf.useLoggingSink != nil, + testHandle: conf.useLoggingSink, + protocolDataDir: os.Getenv(logging.EnvTfLogSdkProtoDataDir), + protocolVersion: protocolVersion, } } func (s *server) GetSchema(ctx context.Context, req *tfplugin5.GetProviderSchema_Request) (*tfplugin5.GetProviderSchema_Response, error) { - ctx = rpcLoggingContext(s.loggingContext(ctx), "GetSchema") + rpc := "GetProviderSchema" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.GetProviderSchemaRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.GetProviderSchema(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") ret, err := toproto.GetProviderSchema_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) PrepareProviderConfig(ctx context.Context, req *tfplugin5.PrepareProviderConfig_Request) (*tfplugin5.PrepareProviderConfig_Response, error) { - ctx = rpcLoggingContext(s.loggingContext(ctx), "PrepareProviderConfig") + rpc := "PrepareProviderConfig" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.PrepareProviderConfigRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.PrepareProviderConfig(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "PreparedConfig", resp.PreparedConfig) ret, err := toproto.PrepareProviderConfig_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) Configure(ctx context.Context, req *tfplugin5.Configure_Request) (*tfplugin5.Configure_Response, error) { - ctx = rpcLoggingContext(s.loggingContext(ctx), "Configure") + rpc := "Configure" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ConfigureProviderRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.ConfigureProvider(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") ret, err := toproto.Configure_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil @@ -412,228 +580,276 @@ func (s *server) stop() { } func (s *server) Stop(ctx context.Context, req *tfplugin5.Stop_Request) (*tfplugin5.Stop_Response, error) { - ctx = rpcLoggingContext(s.loggingContext(ctx), "Stop") + rpc := "Stop" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.StopProviderRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.StopProvider(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Closing all our contexts") + logging.ProtocolTrace(ctx, "Called downstream") + logging.ProtocolTrace(ctx, "Closing all our contexts") s.stop() - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Closed all our contexts") + logging.ProtocolTrace(ctx, "Closed all our contexts") ret, err := toproto.Stop_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ValidateDataSourceConfig(ctx context.Context, req *tfplugin5.ValidateDataSourceConfig_Request) (*tfplugin5.ValidateDataSourceConfig_Response, error) { - ctx = dataSourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ValidateDataSourceConfig"), req.TypeName) + rpc := "ValidateDataSourceConfig" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.DataSourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ValidateDataSourceConfigRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.ValidateDataSourceConfig(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") ret, err := toproto.ValidateDataSourceConfig_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ReadDataSource(ctx context.Context, req *tfplugin5.ReadDataSource_Request) (*tfplugin5.ReadDataSource_Response, error) { - ctx = dataSourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ReadDataSource"), req.TypeName) + rpc := "ReadDataSource" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.DataSourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ReadDataSourceRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", r.ProviderMeta) + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.ReadDataSource(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "State", resp.State) ret, err := toproto.ReadDataSource_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ValidateResourceTypeConfig(ctx context.Context, req *tfplugin5.ValidateResourceTypeConfig_Request) (*tfplugin5.ValidateResourceTypeConfig_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ValidateResourceTypeConfig"), req.TypeName) + rpc := "ValidateResourceTypeConfig" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ValidateResourceTypeConfigRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.ValidateResourceTypeConfig(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") ret, err := toproto.ValidateResourceTypeConfig_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) UpgradeResourceState(ctx context.Context, req *tfplugin5.UpgradeResourceState_Request) (*tfplugin5.UpgradeResourceState_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "UpgradeResourceState"), req.TypeName) + rpc := "UpgradeResourceState" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.UpgradeResourceStateRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.UpgradeResourceState(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "UpgradedState", resp.UpgradedState) ret, err := toproto.UpgradeResourceState_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ReadResource(ctx context.Context, req *tfplugin5.ReadResource_Request) (*tfplugin5.ReadResource_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ReadResource"), req.TypeName) + rpc := "ReadResource" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ReadResourceRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "CurrentState", r.CurrentState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", r.ProviderMeta) + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.ReadResource(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "NewState", resp.NewState) ret, err := toproto.ReadResource_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) PlanResourceChange(ctx context.Context, req *tfplugin5.PlanResourceChange_Request) (*tfplugin5.PlanResourceChange_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "PlanResourceChange"), req.TypeName) + rpc := "PlanResourceChange" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.PlanResourceChangeRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "PriorState", r.PriorState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProposedNewState", r.ProposedNewState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", r.ProviderMeta) + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.PlanResourceChange(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "PlannedState", resp.PlannedState) ret, err := toproto.PlanResourceChange_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ApplyResourceChange(ctx context.Context, req *tfplugin5.ApplyResourceChange_Request) (*tfplugin5.ApplyResourceChange_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ApplyResourceChange"), req.TypeName) + rpc := "ApplyResourceChange" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ApplyResourceChangeRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "PlannedState", r.PlannedState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.ApplyResourceChange(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "NewState", resp.NewState) ret, err := toproto.ApplyResourceChange_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ImportResourceState(ctx context.Context, req *tfplugin5.ImportResourceState_Request) (*tfplugin5.ImportResourceState_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ImportResourceState"), req.TypeName) + rpc := "ImportResourceState" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ImportResourceStateRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.ImportResourceState(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") + for _, importedResource := range resp.ImportedResources { + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response_ImportedResource", "State", importedResource.State) + } ret, err := toproto.ImportResourceState_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/resource.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/resource.go index 1f8deed34..94addd801 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/resource.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/resource.go @@ -103,7 +103,8 @@ func PlanResourceChangeRequest(in *tfplugin6.PlanResourceChange_Request) (*tfpro func PlanResourceChangeResponse(in *tfplugin6.PlanResourceChange_Response) (*tfprotov6.PlanResourceChangeResponse, error) { resp := &tfprotov6.PlanResourceChangeResponse{ - PlannedPrivate: in.PlannedPrivate, + PlannedPrivate: in.PlannedPrivate, + UnsafeToUseLegacyTypeSystem: in.LegacyTypeSystem, } attributePaths, err := AttributePaths(in.RequiresReplace) if err != nil { @@ -143,7 +144,8 @@ func ApplyResourceChangeRequest(in *tfplugin6.ApplyResourceChange_Request) (*tfp func ApplyResourceChangeResponse(in *tfplugin6.ApplyResourceChange_Response) (*tfprotov6.ApplyResourceChangeResponse, error) { resp := &tfprotov6.ApplyResourceChangeResponse{ - Private: in.Private, + Private: in.Private, + UnsafeToUseLegacyTypeSystem: in.LegacyTypeSystem, } diags, err := Diagnostics(in.Diagnostics) if err != nil { diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/schema.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/schema.go index 388a0193c..db720246f 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/schema.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/schema.go @@ -131,9 +131,7 @@ func SchemaObjectNestingMode(in tfplugin6.Schema_Object_NestingMode) tfprotov6.S func SchemaObject(in *tfplugin6.Schema_Object) (*tfprotov6.SchemaObject, error) { resp := &tfprotov6.SchemaObject{ - Nesting: SchemaObjectNestingMode(in.Nesting), - MinItems: in.MinItems, - MaxItems: in.MaxItems, + Nesting: SchemaObjectNestingMode(in.Nesting), } attrs, err := SchemaAttributes(in.Attributes) diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/generate.sh b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/generate.sh deleted file mode 100644 index ca2a04688..000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/generate.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -# We do not run protoc under go:generate because we want to ensure that all -# dependencies of go:generate are "go get"-able for general dev environment -# usability. To compile all protobuf files in this repository, run -# "make protobuf" at the top-level. - -set -eu - -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done -DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" - -cd "$DIR" - -protoc --proto_path=. --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative tfplugin6.proto diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.pb.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.pb.go index 332ece40e..a61e1875c 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.pb.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.pb.go @@ -1,6 +1,6 @@ -// Terraform Plugin RPC protocol version 6.0 +// Terraform Plugin RPC protocol version 6.2 // -// This file defines version 6.0 of the RPC protocol. To implement a plugin +// This file defines version 6.2 of the RPC protocol. To implement a plugin // against this protocol, copy this definition into your own codebase and // use protoc to generate stubs for your target language. // @@ -19,14 +19,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.23.0 -// protoc v3.15.6 +// protoc-gen-go v1.28.0 +// protoc v3.19.4 // source: tfplugin6.proto package tfplugin6 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -40,10 +39,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type StringKind int32 const ( @@ -1476,8 +1471,13 @@ type Schema_Object struct { Attributes []*Schema_Attribute `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` Nesting Schema_Object_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=tfplugin6.Schema_Object_NestingMode" json:"nesting,omitempty"` - MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + // MinItems and MaxItems were never used in the protocol, and have no + // effect on validation. + // + // Deprecated: Do not use. + MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + // Deprecated: Do not use. + MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` } func (x *Schema_Object) Reset() { @@ -1526,6 +1526,7 @@ func (x *Schema_Object) GetNesting() Schema_Object_NestingMode { return Schema_Object_INVALID } +// Deprecated: Do not use. func (x *Schema_Object) GetMinItems() int64 { if x != nil { return x.MinItems @@ -1533,6 +1534,7 @@ func (x *Schema_Object) GetMinItems() int64 { return 0 } +// Deprecated: Do not use. func (x *Schema_Object) GetMaxItems() int64 { if x != nil { return x.MaxItems @@ -2417,6 +2419,18 @@ type PlanResourceChange_Response struct { RequiresReplace []*AttributePath `protobuf:"bytes,2,rep,name=requires_replace,json=requiresReplace,proto3" json:"requires_replace,omitempty"` PlannedPrivate []byte `protobuf:"bytes,3,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,5,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` } func (x *PlanResourceChange_Response) Reset() { @@ -2479,6 +2493,13 @@ func (x *PlanResourceChange_Response) GetDiagnostics() []*Diagnostic { return nil } +func (x *PlanResourceChange_Response) GetLegacyTypeSystem() bool { + if x != nil { + return x.LegacyTypeSystem + } + return false +} + type ApplyResourceChange_Request struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2574,6 +2595,18 @@ type ApplyResourceChange_Response struct { NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` Private []byte `protobuf:"bytes,2,opt,name=private,proto3" json:"private,omitempty"` Diagnostics []*Diagnostic `protobuf:"bytes,3,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,4,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` } func (x *ApplyResourceChange_Response) Reset() { @@ -2629,6 +2662,13 @@ func (x *ApplyResourceChange_Response) GetDiagnostics() []*Diagnostic { return nil } +func (x *ApplyResourceChange_Response) GetLegacyTypeSystem() bool { + if x != nil { + return x.LegacyTypeSystem + } + return false +} + type ImportResourceState_Request struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2970,7 +3010,7 @@ var file_tfplugin6_proto_rawDesc = []byte{ 0x74, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x8d, 0x0a, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x95, 0x0a, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, @@ -3035,7 +3075,7 @@ var file_tfplugin6_proto_rawDesc = []byte{ 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x41, 0x50, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x05, 0x1a, - 0x83, 0x02, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x61, 0x74, + 0x8b, 0x02, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, @@ -3043,328 +3083,334 @@ var file_tfplugin6_proto_rawDesc = []byte{ 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x07, - 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, - 0x74, 0x65, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, - 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, - 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, - 0x73, 0x22, 0x42, 0x0a, 0x0b, 0x4e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, - 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, - 0x06, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x53, - 0x54, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, - 0x4d, 0x41, 0x50, 0x10, 0x04, 0x22, 0xd0, 0x04, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xaf, 0x04, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, - 0x65, 0x72, 0x12, 0x65, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x6c, 0x0a, 0x13, 0x64, 0x61, 0x74, - 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, - 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, - 0x12, 0x36, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x55, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, + 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, + 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, + 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x42, 0x0a, 0x0b, 0x4e, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, + 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, + 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x53, + 0x45, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x41, 0x50, 0x10, 0x04, 0x22, 0xd0, 0x04, + 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xaf, + 0x04, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x08, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x10, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, - 0x57, 0x0a, 0x16, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x99, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, - 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, - 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, - 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x73, 0x22, 0x90, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x72, 0x0a, - 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x30, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, - 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x08, 0x72, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x1a, 0x83, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, - 0x0a, 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x73, 0x12, 0x6c, 0x0a, 0x13, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x64, 0x61, + 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, + 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x36, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, + 0x1a, 0x55, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x57, 0x0a, 0x16, 0x44, 0x61, 0x74, 0x61, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x99, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x0d, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, - 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, - 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, - 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, - 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, - 0x22, 0xba, 0x01, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, - 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, - 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, - 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, - 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xc1, 0x01, - 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x1a, 0x67, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, - 0x0a, 0x11, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x65, 0x72, 0x72, 0x61, - 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x06, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, - 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x1a, 0xbc, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, - 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x90, 0x02, 0x0a, + 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x72, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x08, 0x72, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x83, 0x01, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, + 0xb6, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, - 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x63, 0x75, 0x72, - 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, - 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xba, 0x01, 0x0a, 0x1a, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, + 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xc1, 0x01, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x1a, 0x67, 0x0a, 0x07, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, + 0x6f, 0x72, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x10, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x0c, 0x52, 0x65, + 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0xbc, 0x01, 0x0a, 0x07, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, - 0x61, 0x1a, 0x93, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, - 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, - 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x18, 0x0a, - 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, - 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x22, 0xc4, 0x04, 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xbb, - 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, - 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, - 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x45, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x5f, 0x6e, 0x65, - 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, - 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, - 0x4e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x69, - 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x0c, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, - 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, - 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0xef, 0x01, 0x0a, - 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, - 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, - 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, - 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x69, - 0x72, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x0f, 0x72, 0x65, 0x71, - 0x75, 0x69, 0x72, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, - 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, - 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xe4, - 0x03, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xb6, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x93, 0x01, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, + 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x22, + 0xf2, 0x04, 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xbb, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, - 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, - 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x45, 0x0a, 0x12, 0x70, 0x72, 0x6f, + 0x70, 0x6f, 0x73, 0x65, 0x64, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, + 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x4e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, - 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, - 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, - 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, - 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x50, + 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x9d, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, - 0x93, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, - 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, - 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, - 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xed, 0x02, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x36, 0x0a, + 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x43, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, + 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, + 0x61, 0x74, 0x68, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x52, 0x65, 0x70, + 0x6c, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, + 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, + 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x22, 0x92, 0x04, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xb6, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x64, 0x1a, 0x78, 0x0a, 0x10, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, - 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, - 0xa3, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x12, - 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x11, 0x69, 0x6d, 0x70, 0x6f, 0x72, - 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, - 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x9c, 0x02, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, - 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x95, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, - 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, + 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, + 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0xc1, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, + 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, + 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, + 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, + 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0xed, 0x02, 0x0a, 0x13, 0x49, 0x6d, + 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x1a, 0x36, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x1a, 0x78, 0x0a, 0x10, 0x49, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1b, 0x0a, + 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, - 0x1a, 0x72, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x73, 0x2a, 0x25, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, - 0x6e, 0x64, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, - 0x08, 0x4d, 0x41, 0x52, 0x4b, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x32, 0xcc, 0x09, 0x0a, 0x08, - 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x60, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x1a, 0xa3, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x5e, 0x0a, 0x12, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x11, 0x69, + 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x9c, 0x02, 0x0a, 0x0e, 0x52, 0x65, + 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x95, 0x01, 0x0a, + 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x72, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x2a, 0x25, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x10, + 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4d, 0x41, 0x52, 0x4b, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x32, + 0xcc, 0x09, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x60, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x16, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, - 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x16, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x1a, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2d, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, - 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, - 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x14, 0x55, 0x70, 0x67, - 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x55, 0x70, - 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, - 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x36, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x12, 0x50, 0x6c, 0x61, - 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x50, 0x6c, 0x61, 0x6e, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x36, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, - 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x36, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x36, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x57, - 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x12, 0x21, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x65, 0x61, - 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, - 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x53, 0x74, 0x6f, 0x70, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, - 0x72, 0x70, 0x2f, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x2f, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, + 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, + 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x6f, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x7b, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2d, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, + 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x24, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, + 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, + 0x12, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x66, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x70, + 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x13, 0x49, 0x6d, + 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x49, 0x6d, + 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x21, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x53, + 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x33, + 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, + 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.proto b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.proto index 9986f780a..da5e58eaf 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.proto +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.proto @@ -1,6 +1,6 @@ -// Terraform Plugin RPC protocol version 6.0 +// Terraform Plugin RPC protocol version 6.2 // -// This file defines version 6.0 of the RPC protocol. To implement a plugin +// This file defines version 6.2 of the RPC protocol. To implement a plugin // against this protocol, copy this definition into your own codebase and // use protoc to generate stubs for your target language. // @@ -128,13 +128,16 @@ message Schema { repeated Attribute attributes = 1; NestingMode nesting = 3; - int64 min_items = 4; - int64 max_items = 5; + + // MinItems and MaxItems were never used in the protocol, and have no + // effect on validation. + int64 min_items = 4 [deprecated = true]; + int64 max_items = 5 [deprecated = true]; } // The version of the schema. // Schemas are versioned, so that providers can upgrade a saved resource - // state when the schema is changed. + // state when the schema is changed. int64 version = 1; // Block is the top level configuration block for this schema. @@ -262,15 +265,28 @@ message PlanResourceChange { DynamicValue prior_state = 2; DynamicValue proposed_new_state = 3; DynamicValue config = 4; - bytes prior_private = 5; + bytes prior_private = 5; DynamicValue provider_meta = 6; } message Response { DynamicValue planned_state = 1; repeated AttributePath requires_replace = 2; - bytes planned_private = 3; + bytes planned_private = 3; repeated Diagnostic diagnostics = 4; + + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + bool legacy_type_system = 5; } } @@ -280,13 +296,26 @@ message ApplyResourceChange { DynamicValue prior_state = 2; DynamicValue planned_state = 3; DynamicValue config = 4; - bytes planned_private = 5; + bytes planned_private = 5; DynamicValue provider_meta = 6; } message Response { DynamicValue new_state = 1; - bytes private = 2; + bytes private = 2; repeated Diagnostic diagnostics = 3; + + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + bool legacy_type_system = 4; } } @@ -318,4 +347,4 @@ message ReadDataSource { DynamicValue state = 1; repeated Diagnostic diagnostics = 2; } -} \ No newline at end of file +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6_grpc.pb.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6_grpc.pb.go index 957e9d239..a22d32dc9 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6_grpc.pb.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.19.4 +// source: tfplugin6.proto package tfplugin6 diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/resource.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/resource.go index e86988849..bb0968185 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/resource.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/resource.go @@ -101,7 +101,8 @@ func PlanResourceChange_Request(in *tfprotov6.PlanResourceChangeRequest) (*tfplu func PlanResourceChange_Response(in *tfprotov6.PlanResourceChangeResponse) (*tfplugin6.PlanResourceChange_Response, error) { resp := &tfplugin6.PlanResourceChange_Response{ - PlannedPrivate: in.PlannedPrivate, + PlannedPrivate: in.PlannedPrivate, + LegacyTypeSystem: in.UnsafeToUseLegacyTypeSystem, //nolint:staticcheck } requiresReplace, err := AttributePaths(in.RequiresReplace) if err != nil { @@ -141,7 +142,8 @@ func ApplyResourceChange_Request(in *tfprotov6.ApplyResourceChangeRequest) (*tfp func ApplyResourceChange_Response(in *tfprotov6.ApplyResourceChangeResponse) (*tfplugin6.ApplyResourceChange_Response, error) { resp := &tfplugin6.ApplyResourceChange_Response{ - Private: in.Private, + Private: in.Private, + LegacyTypeSystem: in.UnsafeToUseLegacyTypeSystem, //nolint:staticcheck } diags, err := Diagnostics(in.Diagnostics) if err != nil { diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/schema.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/schema.go index f9f721a15..f3dfcb97d 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/schema.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/schema.go @@ -127,9 +127,7 @@ func Schema_Object_NestingMode(in tfprotov6.SchemaObjectNestingMode) tfplugin6.S func Schema_Object(in *tfprotov6.SchemaObject) (*tfplugin6.Schema_Object, error) { resp := &tfplugin6.Schema_Object{ - Nesting: Schema_Object_NestingMode(in.Nesting), - MinItems: in.MinItems, - MaxItems: in.MaxItems, + Nesting: Schema_Object_NestingMode(in.Nesting), } attrs, err := Schema_Attributes(in.Attributes) if err != nil { diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/schema.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/schema.go index a7e139fed..c17e2ccee 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/schema.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/schema.go @@ -96,6 +96,19 @@ type Schema struct { Block *SchemaBlock } +// ValueType returns the tftypes.Type for a Schema. +// +// If Schema is missing, an empty Object is returned. +func (s *Schema) ValueType() tftypes.Type { + if s == nil { + return tftypes.Object{ + AttributeTypes: map[string]tftypes.Type{}, + } + } + + return s.Block.ValueType() +} + // SchemaBlock represents a block in a schema. Blocks are how Terraform creates // groupings of attributes. In configurations, they don't use the equals sign // and use dynamic instead of list comprehensions. @@ -133,6 +146,51 @@ type SchemaBlock struct { Deprecated bool } +// ValueType returns the tftypes.Type for a SchemaBlock. +// +// If SchemaBlock is missing, an empty Object is returned. +func (s *SchemaBlock) ValueType() tftypes.Type { + if s == nil { + return tftypes.Object{ + AttributeTypes: map[string]tftypes.Type{}, + } + } + + attributeTypes := map[string]tftypes.Type{} + + for _, attribute := range s.Attributes { + if attribute == nil { + continue + } + + attributeType := attribute.ValueType() + + if attributeType == nil { + continue + } + + attributeTypes[attribute.Name] = attributeType + } + + for _, block := range s.BlockTypes { + if block == nil { + continue + } + + blockType := block.ValueType() + + if blockType == nil { + continue + } + + attributeTypes[block.TypeName] = blockType + } + + return tftypes.Object{ + AttributeTypes: attributeTypes, + } +} + // SchemaAttribute represents a single attribute within a schema block. // Attributes are the fields users can set in configuration using the equals // sign, can assign to variables, can interpolate, and can use list @@ -194,6 +252,22 @@ type SchemaAttribute struct { Deprecated bool } +// ValueType returns the tftypes.Type for a SchemaAttribute. +// +// If SchemaAttribute is missing, nil is returned. +func (s *SchemaAttribute) ValueType() tftypes.Type { + if s == nil { + return nil + } + + // It is not valid to set both NestedType and Type. + if s.NestedType != nil { + return s.NestedType.ValueType() + } + + return s.Type +} + // SchemaNestedBlock is a nested block within another block. See SchemaBlock // for more information on blocks. type SchemaNestedBlock struct { @@ -230,6 +304,39 @@ type SchemaNestedBlock struct { MaxItems int64 } +// ValueType returns the tftypes.Type for a SchemaNestedBlock. +// +// If SchemaNestedBlock is missing or the Nesting mode is invalid, nil is +// returned. +func (s *SchemaNestedBlock) ValueType() tftypes.Type { + if s == nil { + return nil + } + + blockType := s.Block.ValueType() + + switch s.Nesting { + case SchemaNestedBlockNestingModeGroup: + return blockType + case SchemaNestedBlockNestingModeList: + return tftypes.List{ + ElementType: blockType, + } + case SchemaNestedBlockNestingModeMap: + return tftypes.Map{ + ElementType: blockType, + } + case SchemaNestedBlockNestingModeSet: + return tftypes.Set{ + ElementType: blockType, + } + case SchemaNestedBlockNestingModeSingle: + return blockType + default: + return nil + } +} + // SchemaNestedBlockNestingMode indicates the nesting mode for // SchemaNestedBlocks. The nesting mode determines the number of instances of // the block allowed, how many labels the block expects, and the data structure @@ -260,14 +367,54 @@ type SchemaObject struct { Attributes []*SchemaAttribute Nesting SchemaObjectNestingMode +} - // MinItems is the minimum number of instances of this type that a - // user must specify or Terraform will return an error. - MinItems int64 +// ValueType returns the tftypes.Type for a SchemaObject. +// +// If SchemaObject is missing or the Nesting mode is invalid, nil is returned. +func (s *SchemaObject) ValueType() tftypes.Type { + if s == nil { + return nil + } - // MaxItems is the maximum number of instances of this type that a - // user may specify before Terraform returns an error. - MaxItems int64 + attributeTypes := map[string]tftypes.Type{} + + for _, attribute := range s.Attributes { + if attribute == nil { + continue + } + + attributeType := attribute.ValueType() + + if attributeType == nil { + continue + } + + attributeTypes[attribute.Name] = attributeType + } + + objectType := tftypes.Object{ + AttributeTypes: attributeTypes, + } + + switch s.Nesting { + case SchemaObjectNestingModeList: + return tftypes.List{ + ElementType: objectType, + } + case SchemaObjectNestingModeMap: + return tftypes.Map{ + ElementType: objectType, + } + case SchemaObjectNestingModeSet: + return tftypes.Set{ + ElementType: objectType, + } + case SchemaObjectNestingModeSingle: + return objectType + default: + return nil + } } // SchemaObjectNestingMode indicates the nesting mode for diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go index 4fd0b6eba..4ed9ece60 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go @@ -2,51 +2,82 @@ package tf6server import ( "context" + "encoding/json" "errors" - "log" + "fmt" + "os" + "os/signal" "regexp" + "runtime" "strings" "sync" + "time" + "github.com/hashicorp/terraform-plugin-go/internal/logging" "github.com/hashicorp/terraform-plugin-go/tfprotov6" "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto" "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto" + "google.golang.org/grpc" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" - "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-log/tfsdklog" - tfaddr "github.com/hashicorp/terraform-registry-address" testing "github.com/mitchellh/go-testing-interface" ) -const tflogSubsystemName = "proto" - -// Global logging keys attached to all requests. -// -// Practitioners or tooling reading logs may be depending on these keys, so be -// conscious of that when changing them. const ( - // A unique ID for the RPC request - logKeyRequestID = "tf_req_id" - - // The full address of the provider, such as - // registry.terraform.io/hashicorp/random - logKeyProviderAddress = "tf_provider_addr" - - // The RPC being run, such as "ApplyResourceChange" - logKeyRPC = "tf_rpc" + // protocolVersionMajor represents the major version number of the protocol + // being served. This is used during the plugin handshake to validate the + // server and client are compatible. + // + // In the future, it may be possible to include this information directly + // in the protocol buffers rather than recreating a constant here. + protocolVersionMajor uint = 6 + + // protocolVersionMinor represents the minor version number of the protocol + // being served. Backwards compatible additions are possible in the + // protocol definitions, which is when this may be increased. While it is + // not used in plugin negotiation, it can be helpful to include this value + // for debugging, such as in logs. + // + // In the future, it may be possible to include this information directly + // in the protocol buffers rather than recreating a constant here. + protocolVersionMinor uint = 0 +) - // The type of resource being operated on, such as "random_pet" - logKeyResourceType = "tf_resource_type" +// protocolVersion represents the combined major and minor version numbers of +// the protocol being served. +var protocolVersion string = fmt.Sprintf("%d.%d", protocolVersionMajor, protocolVersionMinor) - // The type of data source being operated on, such as "archive_file" - logKeyDataSourceType = "tf_data_source_type" +const ( + // envTfReattachProviders is the environment variable used by Terraform CLI + // to directly connect to already running provider processes, such as those + // being inspected by debugging processes. When connecting to providers in + // this manner, Terraform CLI disables certain plugin handshake checks and + // will not stop the provider process. + envTfReattachProviders = "TF_REATTACH_PROVIDERS" +) - // The protocol version being used, as a string, such as "6" - logKeyProtocolVersion = "tf_proto_version" +const ( + // grpcMaxMessageSize is the maximum gRPC send and receive message sizes + // for the server. + // + // This 256MB value is arbitrarily raised from the default message sizes of + // 4MB to account for advanced use cases, but arbitrarily lowered from + // MaxInt32 (or similar) to prevent incorrect server implementations from + // exhausting resources in common execution environments. Receiving a gRPC + // message size error is preferable for troubleshooting over determining + // why an execution environment may have terminated the process via its + // memory management processes, such as oom-killer on Linux. + // + // This value is kept as constant over allowing server configurability + // since there are many factors that influence message size, such as + // Terraform configuration and state data. If larger message size use + // cases appear, other gRPC options should be explored, such as + // implementing streaming RPCs and messages. + grpcMaxMessageSize = 256 << 20 ) // ServeOpt is an interface for defining options that can be passed to the @@ -65,6 +96,10 @@ type ServeConfig struct { debugCh chan *plugin.ReattachConfig debugCloseCh chan struct{} + managedDebug bool + managedDebugReattachConfigTimeout time.Duration + managedDebugStopSignals []os.Signal + disableLogInitStderr bool disableLogLocation bool useLoggingSink testing.T @@ -79,8 +114,17 @@ func (s serveConfigFunc) ApplyServeOpt(in *ServeConfig) error { // WithDebug returns a ServeOpt that will set the server into debug mode, using // the passed options to populate the go-plugin ServeTestConfig. +// +// This is an advanced ServeOpt that assumes the caller will fully manage the +// reattach configuration and server lifecycle. Refer to WithManagedDebug for a +// ServeOpt that handles common use cases, such as implementing provider main +// functions. func WithDebug(ctx context.Context, config chan *plugin.ReattachConfig, closeCh chan struct{}) ServeOpt { return serveConfigFunc(func(in *ServeConfig) error { + if in.managedDebug { + return errors.New("cannot set both WithDebug and WithManagedDebug") + } + in.debugCtx = ctx in.debugCh = config in.debugCloseCh = closeCh @@ -88,6 +132,47 @@ func WithDebug(ctx context.Context, config chan *plugin.ReattachConfig, closeCh }) } +// WithManagedDebug returns a ServeOpt that will start the server in debug +// mode, managing the reattach configuration handling and server lifecycle. +// Reattach configuration is output to stdout with human friendly instructions. +// By default, the server can be stopped with os.Interrupt (SIGINT; ctrl-c). +// +// Refer to the optional WithManagedDebugStopSignals and +// WithManagedDebugReattachConfigTimeout ServeOpt for additional configuration. +// +// The reattach configuration output of this handling is not protected by +// compatibility guarantees. Use the WithDebug ServeOpt for advanced use cases. +func WithManagedDebug() ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + if in.debugCh != nil { + return errors.New("cannot set both WithDebug and WithManagedDebug") + } + + in.managedDebug = true + return nil + }) +} + +// WithManagedDebugStopSignals returns a ServeOpt that will set the stop signals for a +// debug managed process (WithManagedDebug). When not configured, os.Interrupt +// (SIGINT; Ctrl-c) will stop the process. +func WithManagedDebugStopSignals(signals []os.Signal) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.managedDebugStopSignals = signals + return nil + }) +} + +// WithManagedDebugReattachConfigTimeout returns a ServeOpt that will set the timeout +// for a debug managed process to start and return its reattach configuration. +// When not configured, 2 seconds is the default. +func WithManagedDebugReattachConfigTimeout(timeout time.Duration) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.managedDebugReattachConfigTimeout = timeout + return nil + }) +} + // WithGoPluginLogger returns a ServeOpt that will set the logger that // go-plugin should use to log messages. func WithGoPluginLogger(logger hclog.Logger) ServeOpt { @@ -152,16 +237,22 @@ func WithLogEnvVarName(name string) ServeOpt { // modify the logger that go-plugin is using, ServeOpts can be specified to // support that. func Serve(name string, serverFactory func() tfprotov6.ProviderServer, opts ...ServeOpt) error { - var conf ServeConfig + // Defaults + conf := ServeConfig{ + managedDebugReattachConfigTimeout: 2 * time.Second, + managedDebugStopSignals: []os.Signal{os.Interrupt}, + } + for _, opt := range opts { err := opt.ApplyServeOpt(&conf) if err != nil { return err } } + serveConfig := &plugin.ServeConfig{ HandshakeConfig: plugin.HandshakeConfig{ - ProtocolVersion: 6, + ProtocolVersion: protocolVersionMajor, MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", }, @@ -172,11 +263,42 @@ func Serve(name string, serverFactory func() tfprotov6.ProviderServer, opts ...S Name: name, }, }, - GRPCServer: plugin.DefaultGRPCServer, + GRPCServer: func(opts []grpc.ServerOption) *grpc.Server { + opts = append(opts, grpc.MaxRecvMsgSize(grpcMaxMessageSize)) + opts = append(opts, grpc.MaxSendMsgSize(grpcMaxMessageSize)) + + return grpc.NewServer(opts...) + }, } + if conf.logger != nil { serveConfig.Logger = conf.logger } + + if conf.managedDebug { + ctx, cancel := context.WithCancel(context.Background()) + signalCh := make(chan os.Signal, len(conf.managedDebugStopSignals)) + + signal.Notify(signalCh, conf.managedDebugStopSignals...) + + defer func() { + signal.Stop(signalCh) + cancel() + }() + + go func() { + select { + case <-signalCh: + cancel() + case <-ctx.Done(): + } + }() + + conf.debugCh = make(chan *plugin.ReattachConfig) + conf.debugCloseCh = make(chan struct{}) + conf.debugCtx = ctx + } + if conf.debugCh != nil { serveConfig.Test = &plugin.ServeTestConfig{ Context: conf.debugCtx, @@ -184,7 +306,78 @@ func Serve(name string, serverFactory func() tfprotov6.ProviderServer, opts ...S CloseCh: conf.debugCloseCh, } } - plugin.Serve(serveConfig) + + if !conf.managedDebug { + plugin.Serve(serveConfig) + return nil + } + + go plugin.Serve(serveConfig) + + var pluginReattachConfig *plugin.ReattachConfig + + select { + case pluginReattachConfig = <-conf.debugCh: + case <-time.After(conf.managedDebugReattachConfigTimeout): + return errors.New("timeout waiting on reattach configuration") + } + + if pluginReattachConfig == nil { + return errors.New("nil reattach configuration received") + } + + // Duplicate implementation is required because the go-plugin + // ReattachConfig.Addr implementation is not friendly for JSON encoding + // and to avoid importing terraform-exec. + type reattachConfigAddr struct { + Network string + String string + } + + type reattachConfig struct { + Protocol string + ProtocolVersion int + Pid int + Test bool + Addr reattachConfigAddr + } + + reattachBytes, err := json.Marshal(map[string]reattachConfig{ + name: { + Protocol: string(pluginReattachConfig.Protocol), + ProtocolVersion: pluginReattachConfig.ProtocolVersion, + Pid: pluginReattachConfig.Pid, + Test: pluginReattachConfig.Test, + Addr: reattachConfigAddr{ + Network: pluginReattachConfig.Addr.Network(), + String: pluginReattachConfig.Addr.String(), + }, + }, + }) + + if err != nil { + return fmt.Errorf("Error building reattach string: %w", err) + } + + reattachStr := string(reattachBytes) + + // This is currently intended to be executed via provider main function and + // human friendly, so output directly to stdout. + fmt.Printf("Provider started. To attach Terraform CLI, set the %s environment variable with the following:\n\n", envTfReattachProviders) + + switch runtime.GOOS { + case "windows": + fmt.Printf("\tCommand Prompt:\tset \"%s=%s\"\n", envTfReattachProviders, reattachStr) + fmt.Printf("\tPowerShell:\t$env:%s='%s'\n", envTfReattachProviders, strings.ReplaceAll(reattachStr, `'`, `''`)) + default: + fmt.Printf("\t%s='%s'\n", envTfReattachProviders, strings.ReplaceAll(reattachStr, `'`, `'"'"'`)) + } + + fmt.Println("") + + // Wait for the server to be done. + <-conf.debugCloseCh + return nil } @@ -200,6 +393,13 @@ type server struct { useTFLogSink bool testHandle testing.T name string + + // protocolDataDir is a directory to store raw protocol data files for + // debugging purposes. + protocolDataDir string + + // protocolVersion is the protocol version for the server. + protocolVersion string } func mergeStop(ctx context.Context, cancel context.CancelFunc, stopCh chan struct{}) { @@ -232,50 +432,11 @@ func (s *server) loggingContext(ctx context.Context) context.Context { ctx = tfsdklog.RegisterTestSink(ctx, s.testHandle) } - // generate a request ID - reqID, err := uuid.GenerateUUID() - if err != nil { - reqID = "unable to assign request ID: " + err.Error() - } - - // set up the logger SDK loggers are derived from - ctx = tfsdklog.NewRootSDKLogger(ctx, append(tfsdklog.Options{ - tfsdklog.WithLevelFromEnv("TF_LOG_SDK"), - }, s.tflogSDKOpts...)...) - ctx = tfsdklog.With(ctx, logKeyRequestID, reqID) - ctx = tfsdklog.With(ctx, logKeyProviderAddress, s.name) - - // set up our protocol-level subsystem logger - ctx = tfsdklog.NewSubsystem(ctx, tflogSubsystemName, append(tfsdklog.Options{ - tfsdklog.WithLevelFromEnv("TF_LOG_SDK_PROTO"), - }, s.tflogSDKOpts...)...) - ctx = tfsdklog.SubsystemWith(ctx, tflogSubsystemName, logKeyProtocolVersion, "6") - - // set up the provider logger - ctx = tfsdklog.NewRootProviderLogger(ctx, s.tflogOpts...) - ctx = tflog.With(ctx, logKeyRequestID, reqID) - ctx = tflog.With(ctx, logKeyProviderAddress, s.name) - return ctx -} - -func rpcLoggingContext(ctx context.Context, rpc string) context.Context { - ctx = tfsdklog.With(ctx, logKeyRPC, rpc) - ctx = tfsdklog.SubsystemWith(ctx, tflogSubsystemName, logKeyRPC, rpc) - ctx = tflog.With(ctx, logKeyRPC, rpc) - return ctx -} + ctx = logging.InitContext(ctx, s.tflogSDKOpts, s.tflogOpts) + ctx = logging.RequestIdContext(ctx) + ctx = logging.ProviderAddressContext(ctx, s.name) + ctx = logging.ProtocolVersionContext(ctx, s.protocolVersion) -func resourceLoggingContext(ctx context.Context, resource string) context.Context { - ctx = tfsdklog.With(ctx, logKeyResourceType, resource) - ctx = tfsdklog.SubsystemWith(ctx, tflogSubsystemName, logKeyResourceType, resource) - ctx = tflog.With(ctx, logKeyResourceType, resource) - return ctx -} - -func dataSourceLoggingContext(ctx context.Context, dataSource string) context.Context { - ctx = tfsdklog.With(ctx, logKeyDataSourceType, dataSource) - ctx = tfsdklog.SubsystemWith(ctx, tflogSubsystemName, logKeyDataSourceType, dataSource) - ctx = tflog.With(ctx, logKeyDataSourceType, dataSource) return ctx } @@ -303,97 +464,101 @@ func New(name string, serve tfprotov6.ProviderServer, opts ...ServeOpt) tfplugin } envVar := conf.envVar if envVar == "" { - addr, err := tfaddr.ParseRawProviderSourceString(name) - if err != nil { - log.Printf("[ERROR] Error parsing provider name %q: %s", name, err) - } else { - envVar = addr.Type - } + envVar = logging.ProviderLoggerName(name) } - envVar = strings.ReplaceAll(envVar, "-", "_") if envVar != "" { - options = append(options, tfsdklog.WithLogName(envVar), tflog.WithLevelFromEnv("TF_LOG_PROVIDER", envVar)) + options = append(options, tfsdklog.WithLogName(envVar), tflog.WithLevelFromEnv(logging.EnvTfLogProvider, envVar)) } return &server{ - downstream: serve, - stopCh: make(chan struct{}), - tflogOpts: options, - tflogSDKOpts: sdkOptions, - name: name, - useTFLogSink: conf.useLoggingSink != nil, - testHandle: conf.useLoggingSink, + downstream: serve, + stopCh: make(chan struct{}), + tflogOpts: options, + tflogSDKOpts: sdkOptions, + name: name, + useTFLogSink: conf.useLoggingSink != nil, + testHandle: conf.useLoggingSink, + protocolDataDir: os.Getenv(logging.EnvTfLogSdkProtoDataDir), + protocolVersion: protocolVersion, } } func (s *server) GetProviderSchema(ctx context.Context, req *tfplugin6.GetProviderSchema_Request) (*tfplugin6.GetProviderSchema_Response, error) { - ctx = rpcLoggingContext(s.loggingContext(ctx), "GetProviderSchema") + rpc := "GetProviderSchema" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.GetProviderSchemaRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.GetProviderSchema(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") ret, err := toproto.GetProviderSchema_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ConfigureProvider(ctx context.Context, req *tfplugin6.ConfigureProvider_Request) (*tfplugin6.ConfigureProvider_Response, error) { - ctx = rpcLoggingContext(s.loggingContext(ctx), "ConfigureProvider") + rpc := "ConfigureProvider" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ConfigureProviderRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.ConfigureProvider(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") ret, err := toproto.Configure_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ValidateProviderConfig(ctx context.Context, req *tfplugin6.ValidateProviderConfig_Request) (*tfplugin6.ValidateProviderConfig_Response, error) { - ctx = rpcLoggingContext(s.loggingContext(ctx), "ValidateProviderConfig") - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + rpc := "ValidateProviderConfig" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ValidateProviderConfigRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.ValidateProviderConfig(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") ret, err := toproto.ValidateProviderConfig_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil @@ -413,228 +578,276 @@ func (s *server) stop() { } func (s *server) Stop(ctx context.Context, req *tfplugin6.StopProvider_Request) (*tfplugin6.StopProvider_Response, error) { - ctx = rpcLoggingContext(s.loggingContext(ctx), "Stop") + rpc := "StopProvider" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.StopProviderRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.StopProvider(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Closing all our contexts") + logging.ProtocolTrace(ctx, "Called downstream") + logging.ProtocolTrace(ctx, "Closing all our contexts") s.stop() - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Closed all our contexts") + logging.ProtocolTrace(ctx, "Closed all our contexts") ret, err := toproto.Stop_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ValidateDataResourceConfig(ctx context.Context, req *tfplugin6.ValidateDataResourceConfig_Request) (*tfplugin6.ValidateDataResourceConfig_Response, error) { - ctx = dataSourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ValidateDataResourceConfig"), req.TypeName) + rpc := "ValidateDataResourceConfig" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.DataSourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ValidateDataResourceConfigRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.ValidateDataResourceConfig(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") ret, err := toproto.ValidateDataResourceConfig_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ReadDataSource(ctx context.Context, req *tfplugin6.ReadDataSource_Request) (*tfplugin6.ReadDataSource_Response, error) { - ctx = dataSourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ReadDataSource"), req.TypeName) + rpc := "ReadDataSource" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.DataSourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ReadDataSourceRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", r.ProviderMeta) + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.ReadDataSource(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "State", resp.State) ret, err := toproto.ReadDataSource_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ValidateResourceConfig(ctx context.Context, req *tfplugin6.ValidateResourceConfig_Request) (*tfplugin6.ValidateResourceConfig_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ValidateResourceConfig"), req.TypeName) + rpc := "ValidateResourceConfig" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ValidateResourceConfigRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.ValidateResourceConfig(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") ret, err := toproto.ValidateResourceConfig_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) UpgradeResourceState(ctx context.Context, req *tfplugin6.UpgradeResourceState_Request) (*tfplugin6.UpgradeResourceState_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "UpgradeResourceState"), req.TypeName) + rpc := "UpgradeResourceState" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.UpgradeResourceStateRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.UpgradeResourceState(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "UpgradedState", resp.UpgradedState) ret, err := toproto.UpgradeResourceState_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ReadResource(ctx context.Context, req *tfplugin6.ReadResource_Request) (*tfplugin6.ReadResource_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ReadResource"), req.TypeName) + rpc := "ReadResource" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ReadResourceRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "CurrentState", r.CurrentState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", r.ProviderMeta) + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.ReadResource(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "NewState", resp.NewState) ret, err := toproto.ReadResource_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) PlanResourceChange(ctx context.Context, req *tfplugin6.PlanResourceChange_Request) (*tfplugin6.PlanResourceChange_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "PlanResourceChange"), req.TypeName) + rpc := "PlanResourceChange" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.PlanResourceChangeRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "PriorState", r.PriorState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProposedNewState", r.ProposedNewState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", r.ProviderMeta) + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.PlanResourceChange(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "PlannedState", resp.PlannedState) ret, err := toproto.PlanResourceChange_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ApplyResourceChange(ctx context.Context, req *tfplugin6.ApplyResourceChange_Request) (*tfplugin6.ApplyResourceChange_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ApplyResourceChange"), req.TypeName) + rpc := "ApplyResourceChange" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ApplyResourceChangeRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "PlannedState", r.PlannedState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.ApplyResourceChange(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "NewState", resp.NewState) ret, err := toproto.ApplyResourceChange_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ImportResourceState(ctx context.Context, req *tfplugin6.ImportResourceState_Request) (*tfplugin6.ImportResourceState_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ImportResourceState"), req.TypeName) + rpc := "ImportResourceState" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ImportResourceStateRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolTrace(ctx, "Calling downstream") resp, err := s.downstream.ImportResourceState(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + logging.ProtocolTrace(ctx, "Called downstream") + for _, importedResource := range resp.ImportedResources { + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response_ImportedResource", "State", importedResource.State) + } ret, err := toproto.ImportResourceState_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/attribute_path.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/attribute_path.go index fd91f96a5..d6108d4ab 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/attribute_path.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/attribute_path.go @@ -284,9 +284,9 @@ type AttributePathStepper interface { ApplyTerraform5AttributePathStep(AttributePathStep) (interface{}, error) } -// WalkAttributePath will return the value that `path` is pointing to, using -// `in` as the root. If an error is returned, the AttributePath returned will -// indicate the steps that remained to be applied when the error was +// WalkAttributePath will return the Type or Value that `path` is pointing to, +// using `in` as the root. If an error is returned, the AttributePath returned +// will indicate the steps that remained to be applied when the error was // encountered. // // map[string]interface{} and []interface{} types have built-in support. Other diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/diff.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/diff.go index dc89026c5..328b1adf6 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/diff.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/diff.go @@ -283,6 +283,9 @@ func (val1 Value) Diff(val2 Value) ([]ValueDiff, error) { // if we have the same keys, we can just let recursion // from the walk check the sub-values match return true, nil + case value1.Type().Is(DynamicPseudoType): + // Let recursion from the walk check the sub-values match + return true, nil } return false, fmt.Errorf("unexpected type %v in Diff at %s", value1.Type(), path) }) diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/list.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/list.go index a633ed1e0..a0bc2f370 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/list.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/list.go @@ -15,6 +15,23 @@ type List struct { _ []struct{} } +// ApplyTerraform5AttributePathStep applies an AttributePathStep to a List, +// returning the Type found at that AttributePath within the List. If the +// AttributePathStep cannot be applied to the List, an ErrInvalidStep error +// will be returned. +func (l List) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + switch s := step.(type) { + case ElementKeyInt: + if int64(s) < 0 { + return nil, ErrInvalidStep + } + + return l.ElementType, nil + default: + return nil, ErrInvalidStep + } +} + // Equal returns true if the two Lists are exactly equal. Unlike Is, passing in // a List with no ElementType will always return false. func (l List) Equal(o Type) bool { @@ -70,9 +87,7 @@ func valueFromList(typ Type, in interface{}) (Value, error) { case []Value: var valType Type for pos, v := range value { - if v.Type().Is(DynamicPseudoType) && v.IsKnown() { - return Value{}, NewAttributePath().WithElementKeyInt(pos).NewErrorf("invalid value %s for %s", v, v.Type()) - } else if !v.Type().Is(DynamicPseudoType) && !v.Type().UsableAs(typ) { + if !v.Type().UsableAs(typ) { return Value{}, NewAttributePath().WithElementKeyInt(pos).NewErrorf("can't use %s as %s", v.Type(), typ) } if valType == nil { diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/map.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/map.go index 9188df88f..7c1e7d05a 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/map.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/map.go @@ -16,6 +16,19 @@ type Map struct { _ []struct{} } +// ApplyTerraform5AttributePathStep applies an AttributePathStep to a Map, +// returning the Type found at that AttributePath within the Map. If the +// AttributePathStep cannot be applied to the Map, an ErrInvalidStep error +// will be returned. +func (m Map) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + switch step.(type) { + case ElementKeyString: + return m.ElementType, nil + default: + return nil, ErrInvalidStep + } +} + // Equal returns true if the two Maps are exactly equal. Unlike Is, passing in // a Map with no ElementType will always return false. func (m Map) Equal(o Type) bool { @@ -89,9 +102,7 @@ func valueFromMap(typ Type, in interface{}) (Value, error) { var elType Type for _, k := range keys { v := value[k] - if v.Type().Is(DynamicPseudoType) && v.IsKnown() { - return Value{}, NewAttributePath().WithElementKeyString(k).NewErrorf("invalid value %s for %s", v, v.Type()) - } else if !v.Type().Is(DynamicPseudoType) && !v.Type().UsableAs(typ) { + if !v.Type().UsableAs(typ) { return Value{}, NewAttributePath().WithElementKeyString(k).NewErrorf("can't use %s as %s", v.Type(), typ) } if elType == nil { diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/object.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/object.go index 2c9dde76d..ce2bf1325 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/object.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/object.go @@ -24,6 +24,12 @@ type Object struct { // are considered part of the type signature, and their absence means a // value is no longer of that type. // + // OptionalAttributes is only valid when declaring a type constraint + // (e.g. Schema) and should not be used as part of a Type when creating + // a Value (e.g. NewValue()). When creating a Value, all OptionalAttributes + // must still be defined in the Object by setting each attribute to a null + // or known value for its attribute type. + // // The key of OptionalAttributes should be the name of the attribute // that is optional. The value should be an empty struct, used only to // indicate presence. @@ -38,6 +44,29 @@ type Object struct { _ []struct{} } +// ApplyTerraform5AttributePathStep applies an AttributePathStep to an Object, +// returning the Type found at that AttributePath within the Object. If the +// AttributePathStep cannot be applied to the Object, an ErrInvalidStep error +// will be returned. +func (o Object) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + switch s := step.(type) { + case AttributeName: + if len(o.AttributeTypes) == 0 { + return nil, ErrInvalidStep + } + + attrType, ok := o.AttributeTypes[string(s)] + + if !ok { + return nil, ErrInvalidStep + } + + return attrType, nil + default: + return nil, ErrInvalidStep + } +} + // Equal returns true if the two Objects are exactly equal. Unlike Is, passing // in an Object with no AttributeTypes will always return false. func (o Object) Equal(other Type) bool { @@ -183,9 +212,7 @@ func valueFromObject(types map[string]Type, optionalAttrs map[string]struct{}, i if v.Type() == nil { return Value{}, NewAttributePath().WithAttributeName(k).NewErrorf("missing value type") } - if v.Type().Is(DynamicPseudoType) && v.IsKnown() && !v.IsNull() { - return Value{}, NewAttributePath().WithAttributeName(k).NewErrorf("invalid value %s for %s", v, v.Type()) - } else if !v.Type().Is(DynamicPseudoType) && !v.Type().UsableAs(typ) { + if !v.Type().UsableAs(typ) { return Value{}, NewAttributePath().WithAttributeName(k).NewErrorf("can't use %s as %s", v.Type(), typ) } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/primitive.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/primitive.go index ff0f14e60..0349f4145 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/primitive.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/primitive.go @@ -37,6 +37,12 @@ type primitive struct { _ []struct{} } +// ApplyTerraform5AttributePathStep always returns an ErrInvalidStep error +// as it is invalid to step into a primitive. +func (p primitive) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + return nil, ErrInvalidStep +} + func (p primitive) Equal(o Type) bool { v, ok := o.(primitive) if !ok { @@ -61,7 +67,7 @@ func (p primitive) UsableAs(t Type) bool { } func (p primitive) String() string { - return "tftypes." + string(p.name) + return "tftypes." + p.name } func (p primitive) private() {} @@ -102,7 +108,16 @@ func (p primitive) supportedGoTypes() []string { case Bool.name: return []string{"bool", "*bool"} case DynamicPseudoType.name: - return []string{"nil", "UnknownValue"} + // List/Set is covered by Tuple, Map is covered by Object + possibleTypes := []Type{ + String, Bool, Number, + Tuple{}, Object{}, + } + results := []string{} + for _, t := range possibleTypes { + results = append(results, t.supportedGoTypes()...) + } + return results } panic(fmt.Sprintf("unknown primitive type %q", p.name)) } @@ -346,3 +361,45 @@ func valueFromNumber(in interface{}) (Value, error) { return Value{}, fmt.Errorf("tftypes.NewValue can't use %T as a tftypes.Number; expected types are: %s", in, formattedSupportedGoTypes(Number)) } } + +func valueFromDynamicPseudoType(val interface{}) (Value, error) { + switch val := val.(type) { + case string, *string: + v, err := valueFromString(val) + if err != nil { + return Value{}, err + } + v.typ = DynamicPseudoType + return v, nil + case *big.Float, float64, *float64, int, *int, int8, *int8, int16, *int16, int32, *int32, int64, *int64, uint, *uint, uint8, *uint8, uint16, *uint16, uint32, *uint32, uint64, *uint64: + v, err := valueFromNumber(val) + if err != nil { + return Value{}, err + } + v.typ = DynamicPseudoType + return v, nil + case bool, *bool: + v, err := valueFromBool(val) + if err != nil { + return Value{}, err + } + v.typ = DynamicPseudoType + return v, nil + case map[string]Value: + v, err := valueFromObject(nil, nil, val) + if err != nil { + return Value{}, err + } + v.typ = DynamicPseudoType + return v, nil + case []Value: + v, err := valueFromTuple(nil, val) + if err != nil { + return Value{}, err + } + v.typ = DynamicPseudoType + return v, nil + default: + return Value{}, fmt.Errorf("tftypes.NewValue can't use %T as a tftypes.DynamicPseudoType; expected types are: %s", val, formattedSupportedGoTypes(DynamicPseudoType)) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/set.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/set.go index 3bd3e8a67..00163067e 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/set.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/set.go @@ -15,6 +15,19 @@ type Set struct { _ []struct{} } +// ApplyTerraform5AttributePathStep applies an AttributePathStep to a Set, +// returning the Type found at that AttributePath within the Set. If the +// AttributePathStep cannot be applied to the Set, an ErrInvalidStep error +// will be returned. +func (s Set) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + switch step.(type) { + case ElementKeyValue: + return s.ElementType, nil + default: + return nil, ErrInvalidStep + } +} + // Equal returns true if the two Sets are exactly equal. Unlike Is, passing in // a Set with no ElementType will always return false. func (s Set) Equal(o Type) bool { @@ -70,9 +83,7 @@ func valueFromSet(typ Type, in interface{}) (Value, error) { case []Value: var elType Type for _, v := range value { - if v.Type().Is(DynamicPseudoType) && v.IsKnown() { - return Value{}, NewAttributePath().WithElementKeyValue(v).NewErrorf("invalid value %s for %s", v, v.Type()) - } else if !v.Type().Is(DynamicPseudoType) && !v.Type().UsableAs(typ) { + if !v.Type().UsableAs(typ) { return Value{}, NewAttributePath().WithElementKeyValue(v).NewErrorf("can't use %s as %s", v.Type(), typ) } if elType == nil { diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/tuple.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/tuple.go index 088243bbd..5ca5709a3 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/tuple.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/tuple.go @@ -19,6 +19,23 @@ type Tuple struct { _ []struct{} } +// ApplyTerraform5AttributePathStep applies an AttributePathStep to a Tuple, +// returning the Type found at that AttributePath within the Tuple. If the +// AttributePathStep cannot be applied to the Tuple, an ErrInvalidStep error +// will be returned. +func (tu Tuple) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + switch s := step.(type) { + case ElementKeyInt: + if int64(s) < 0 || int64(s) >= int64(len(tu.ElementTypes)) { + return nil, ErrInvalidStep + } + + return tu.ElementTypes[int64(s)], nil + default: + return nil, ErrInvalidStep + } +} + // Equal returns true if the two Tuples are exactly equal. Unlike Is, passing // in a Tuple with no ElementTypes will always return false. func (tu Tuple) Equal(o Type) bool { @@ -109,9 +126,7 @@ func valueFromTuple(types []Type, in interface{}) (Value, error) { } for pos, v := range value { typ := types[pos] - if v.Type().Is(DynamicPseudoType) && v.IsKnown() { - return Value{}, NewAttributePath().WithElementKeyInt(pos).NewErrorf("invalid value %s for %s", v, v.Type()) - } else if !v.Type().Is(DynamicPseudoType) && !v.Type().UsableAs(typ) { + if !v.Type().UsableAs(typ) { return Value{}, NewAttributePath().WithElementKeyInt(pos).NewErrorf("can't use %s as %s", v.Type(), typ) } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/type.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/type.go index 955e322fc..4f0e4af7c 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/type.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/type.go @@ -12,6 +12,12 @@ import ( // implemented by the tftypes package. Types define the shape and // characteristics of data coming from or being sent to Terraform. type Type interface { + // AttributePathStepper requires each Type to implement the + // ApplyTerraform5AttributePathStep method, so Type is compatible with + // WalkAttributePath. The method should return the Type found at that + // AttributePath within the Type or ErrInvalidStep. + AttributePathStepper + // Is is used to determine what type a Type implementation is. It is // the recommended method for determining whether two types are // equivalent or not. diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value.go index 6166cb01b..4d7b74989 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value.go @@ -2,14 +2,13 @@ package tftypes import ( "bytes" - "errors" "fmt" "math/big" "sort" "strconv" "strings" - "github.com/vmihailenco/msgpack" + msgpack "github.com/vmihailenco/msgpack/v4" ) // ValueConverter is an interface that provider-defined types can implement to @@ -296,10 +295,6 @@ func newValue(t Type, val interface{}) (Value, error) { }, nil } - if t.Is(DynamicPseudoType) { - return Value{}, errors.New("cannot have DynamicPseudoType with known value, DynamicPseudoType can only contain null or unknown values") - } - if creator, ok := val.(ValueCreator); ok { var err error val, err = creator.ToTerraform5Value() @@ -357,6 +352,12 @@ func newValue(t Type, val interface{}) (Value, error) { return Value{}, err } return v, nil + case t.Is(DynamicPseudoType): + v, err := valueFromDynamicPseudoType(val) + if err != nil { + return Value{}, err + } + return v, nil default: return Value{}, fmt.Errorf("unknown type %s passed to tftypes.NewValue", t) } @@ -439,7 +440,7 @@ func (val Value) As(dst interface{}) error { if !ok { return fmt.Errorf("can't unmarshal %s into %T, expected *big.Float", val.Type(), dst) } - target.Set(v) + target.Copy(v) return nil case **big.Float: if val.IsNull() { diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_json.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_json.go index c96401adf..308434776 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_json.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_json.go @@ -62,7 +62,7 @@ func jsonUnmarshal(buf []byte, typ Type, p *AttributePath) (Value, error) { return Value{}, p.NewErrorf("unknown type %s", typ) } -func jsonUnmarshalString(buf []byte, typ Type, p *AttributePath) (Value, error) { +func jsonUnmarshalString(buf []byte, _ Type, p *AttributePath) (Value, error) { dec := jsonByteDecoder(buf) tok, err := dec.Token() @@ -98,7 +98,7 @@ func jsonUnmarshalNumber(buf []byte, typ Type, p *AttributePath) (Value, error) } return NewValue(typ, f), nil case string: - f, _, err := big.ParseFloat(string(numTok), 10, 512, big.ToNearestEven) + f, _, err := big.ParseFloat(numTok, 10, 512, big.ToNearestEven) if err != nil { return Value{}, p.NewErrorf("error parsing number: %w", err) } @@ -107,7 +107,7 @@ func jsonUnmarshalNumber(buf []byte, typ Type, p *AttributePath) (Value, error) return Value{}, p.NewErrorf("unsupported type %T sent as %s", tok, Number) } -func jsonUnmarshalBool(buf []byte, typ Type, p *AttributePath) (Value, error) { +func jsonUnmarshalBool(buf []byte, _ Type, p *AttributePath) (Value, error) { dec := jsonByteDecoder(buf) tok, err := dec.Token() if err != nil { @@ -123,7 +123,7 @@ func jsonUnmarshalBool(buf []byte, typ Type, p *AttributePath) (Value, error) { case "false", "0": return NewValue(Bool, false), nil } - switch strings.ToLower(string(v)) { + switch strings.ToLower(v) { case "true": return Value{}, p.NewErrorf("to convert from string, use lowercase \"true\"") case "false": @@ -140,7 +140,7 @@ func jsonUnmarshalBool(buf []byte, typ Type, p *AttributePath) (Value, error) { return Value{}, p.NewErrorf("unsupported type %T sent as %s", tok, Bool) } -func jsonUnmarshalDynamicPseudoType(buf []byte, typ Type, p *AttributePath) (Value, error) { +func jsonUnmarshalDynamicPseudoType(buf []byte, _ Type, p *AttributePath) (Value, error) { dec := jsonByteDecoder(buf) tok, err := dec.Token() if err != nil { @@ -355,20 +355,8 @@ func jsonUnmarshalMap(buf []byte, attrType Type, p *AttributePath) (Value, error return Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('}'), tok) } - elTyp := attrType - if attrType.Is(DynamicPseudoType) { - var elements []Value - for _, val := range vals { - elements = append(elements, val) - } - elTyp, err = TypeFromElements(elements) - if err != nil { - return Value{}, p.NewErrorf("invalid elements for map: %w", err) - } - } - return NewValue(Map{ - ElementType: elTyp, + ElementType: attrType, }, vals), nil } @@ -393,7 +381,6 @@ func jsonUnmarshalTuple(buf []byte, elementTypes []Type, p *AttributePath) (Valu // while generally in Go it's undesirable to treat empty and nil slices // separately, in this case we're surfacing a non-Go-in-origin // distinction, so we'll allow it. - types := []Type{} vals := []Value{} var idx int @@ -415,7 +402,6 @@ func jsonUnmarshalTuple(buf []byte, elementTypes []Type, p *AttributePath) (Valu if err != nil { return Value{}, err } - types = append(types, val.Type()) vals = append(vals, val) } @@ -432,7 +418,7 @@ func jsonUnmarshalTuple(buf []byte, elementTypes []Type, p *AttributePath) (Valu } return NewValue(Tuple{ - ElementTypes: types, + ElementTypes: elementTypes, }, vals), nil } @@ -447,7 +433,6 @@ func jsonUnmarshalObject(buf []byte, attrTypes map[string]Type, p *AttributePath return Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('{'), tok) } - types := map[string]Type{} vals := map[string]Value{} for dec.More() { innerPath := p.WithElementKeyValue(NewValue(String, UnknownValue)) @@ -474,7 +459,6 @@ func jsonUnmarshalObject(buf []byte, attrTypes map[string]Type, p *AttributePath if err != nil { return Value{}, err } - types[key] = val.Type() vals[key] = val } @@ -494,6 +478,6 @@ func jsonUnmarshalObject(buf []byte, attrTypes map[string]Type, p *AttributePath } return NewValue(Object{ - AttributeTypes: types, + AttributeTypes: attrTypes, }, vals), nil } diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_msgpack.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_msgpack.go index 3f3db12ef..c4047aeab 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_msgpack.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_msgpack.go @@ -7,8 +7,8 @@ import ( "math/big" "sort" - "github.com/vmihailenco/msgpack" - msgpackCodes "github.com/vmihailenco/msgpack/codes" + msgpack "github.com/vmihailenco/msgpack/v4" + msgpackCodes "github.com/vmihailenco/msgpack/v4/codes" ) type msgPackUnknownType struct{} @@ -74,7 +74,7 @@ func msgpackUnmarshal(dec *msgpack.Decoder, typ Type, path *AttributePath) (Valu if err != nil { return Value{}, path.NewErrorf("couldn't decode number as int64: %w", err) } - return NewValue(Number, big.NewFloat(float64(rv))), nil + return NewValue(Number, new(big.Float).SetInt64(rv)), nil } switch peek { case msgpackCodes.Int8, msgpackCodes.Int16, msgpackCodes.Int32, msgpackCodes.Int64: @@ -82,19 +82,19 @@ func msgpackUnmarshal(dec *msgpack.Decoder, typ Type, path *AttributePath) (Valu if err != nil { return Value{}, path.NewErrorf("couldn't decode number as int64: %w", err) } - return NewValue(Number, big.NewFloat(float64(rv))), nil + return NewValue(Number, new(big.Float).SetInt64(rv)), nil case msgpackCodes.Uint8, msgpackCodes.Uint16, msgpackCodes.Uint32, msgpackCodes.Uint64: rv, err := dec.DecodeUint64() if err != nil { return Value{}, path.NewErrorf("couldn't decode number as uint64: %w", err) } - return NewValue(Number, big.NewFloat(float64(rv))), nil + return NewValue(Number, new(big.Float).SetUint64(rv)), nil case msgpackCodes.Float, msgpackCodes.Double: rv, err := dec.DecodeFloat64() if err != nil { return Value{}, path.NewErrorf("couldn't decode number as float64: %w", err) } - return NewValue(Number, big.NewFloat(float64(rv))), nil + return NewValue(Number, big.NewFloat(rv)), nil default: rv, err := dec.DecodeString() if err != nil { @@ -239,24 +239,8 @@ func msgpackUnmarshalMap(dec *msgpack.Decoder, typ Type, path *AttributePath) (V vals[key] = val } - elTyp := typ - - if typ.Is(DynamicPseudoType) { - var elements []Value - - for _, val := range vals { - elements = append(elements, val) - } - - elTyp, err = TypeFromElements(elements) - - if err != nil { - return Value{}, path.NewErrorf("invalid elements for map: %w", err) - } - } - return NewValue(Map{ - ElementType: elTyp, + ElementType: typ, }, vals), nil } @@ -275,7 +259,6 @@ func msgpackUnmarshalTuple(dec *msgpack.Decoder, types []Type, path *AttributePa return Value{}, path.NewErrorf("error decoding tuple; expected %d items, got %d", len(types), length) } - elTypes := make([]Type, 0, length) vals := make([]Value, 0, length) for i := 0; i < length; i++ { innerPath := path.WithElementKeyInt(i) @@ -284,12 +267,11 @@ func msgpackUnmarshalTuple(dec *msgpack.Decoder, types []Type, path *AttributePa if err != nil { return Value{}, err } - elTypes = append(elTypes, val.Type()) vals = append(vals, val) } return NewValue(Tuple{ - ElementTypes: elTypes, + ElementTypes: types, }, vals), nil } @@ -308,7 +290,6 @@ func msgpackUnmarshalObject(dec *msgpack.Decoder, types map[string]Type, path *A return Value{}, path.NewErrorf("error decoding object; expected %d attributes, got %d", len(types), length) } - attrTypes := make(map[string]Type, length) vals := make(map[string]Value, length) for i := 0; i < length; i++ { key, err := dec.DecodeString() @@ -324,12 +305,11 @@ func msgpackUnmarshalObject(dec *msgpack.Decoder, types map[string]Type, path *A if err != nil { return Value{}, err } - attrTypes[key] = val.Type() vals[key] = val } return NewValue(Object{ - AttributeTypes: attrTypes, + AttributeTypes: types, }, vals), nil } @@ -397,7 +377,7 @@ func marshalMsgPack(val Value, typ Type, p *AttributePath, enc *msgpack.Encoder) return fmt.Errorf("unknown type %s", typ) } -func marshalMsgPackDynamicPseudoType(val Value, typ Type, p *AttributePath, enc *msgpack.Encoder) error { +func marshalMsgPackDynamicPseudoType(val Value, _ Type, p *AttributePath, enc *msgpack.Encoder) error { typeJSON, err := val.Type().MarshalJSON() if err != nil { return p.NewErrorf("error generating JSON for type %s: %w", val.Type(), err) diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/hclogutils/args.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/hclogutils/args.go new file mode 100644 index 000000000..220120f77 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/hclogutils/args.go @@ -0,0 +1,36 @@ +package hclogutils + +// MapsToArgs will shallow merge field maps into the slice of key1, value1, +// key2, value2, ... arguments expected by hc-log.Logger methods. +func MapsToArgs(maps ...map[string]interface{}) []interface{} { + switch len(maps) { + case 0: + return nil + case 1: + result := make([]interface{}, 0, len(maps[0])*2) + + for k, v := range maps[0] { + result = append(result, k) + result = append(result, v) + } + + return result + default: + mergedMap := make(map[string]interface{}, 0) + + for _, m := range maps { + for k, v := range m { + mergedMap[k] = v + } + } + + result := make([]interface{}, 0, len(mergedMap)*2) + + for k, v := range mergedMap { + result = append(result, k) + result = append(result, v) + } + + return result + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/hclogutils/logger_options.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/hclogutils/logger_options.go new file mode 100644 index 000000000..a0ec34e20 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/hclogutils/logger_options.go @@ -0,0 +1,29 @@ +package hclogutils + +import ( + "github.com/hashicorp/go-hclog" +) + +// LoggerOptionsCopy will safely copy LoggerOptions. Manually implemented +// to save importing a dependency such as github.com/mitchellh/copystructure. +func LoggerOptionsCopy(src *hclog.LoggerOptions) *hclog.LoggerOptions { + if src == nil { + return nil + } + + return &hclog.LoggerOptions{ + AdditionalLocationOffset: src.AdditionalLocationOffset, + Color: src.Color, + DisableTime: src.DisableTime, + Exclude: src.Exclude, + IncludeLocation: src.IncludeLocation, + IndependentLevels: src.IndependentLevels, + JSONFormat: src.JSONFormat, + Level: src.Level, + Mutex: src.Mutex, + Name: src.Name, + Output: src.Output, + TimeFormat: src.TimeFormat, + TimeFn: src.TimeFn, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/log.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/log.go index 8c0ef20f4..983bc874d 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/log.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/log.go @@ -5,6 +5,14 @@ import ( "os" ) +const ( + // Default provider root logger name. + DefaultProviderRootLoggerName string = "provider" + + // Default SDK root logger name. + DefaultSDKRootLoggerName string = "sdk" +) + // loggerKey defines context keys for locating loggers in context.Context // it's a private type to make sure no other packages can override the key type loggerKey string @@ -14,13 +22,31 @@ const ( // logger for writing logs from within provider code. ProviderRootLoggerKey loggerKey = "provider" + // ProviderRootLoggerOptionsKey is the loggerKey that will hold the root + // logger options when the root provider logger is created. This is to + // assist creating subsystem loggers, as most options cannot be fetched and + // a logger does not provide set methods for these options. + ProviderRootLoggerOptionsKey loggerKey = "provider-options" + // SDKRootLoggerKey is the loggerKey that will hold the root logger for // writing logs from with SDKs. SDKRootLoggerKey loggerKey = "sdk" + // SDKRootLoggerOptionsKey is the loggerKey that will hold the root + // logger options when the SDK provider logger is created. This is to + // assist creating subsystem loggers, as most options cannot be fetched and + // a logger does not provide set methods for these options. + SDKRootLoggerOptionsKey loggerKey = "sdk-options" + // SinkKey is the loggerKey that will hold the logging sink used for // test frameworks. SinkKey loggerKey = "" + + // SinkOptionsKey is the loggerKey that will hold the sink + // logger options when the SDK provider logger is created. This is to + // assist creating subsystem loggers, as most options cannot be fetched and + // a logger does not provide set methods for these options. + SinkOptionsKey loggerKey = "sink-options" ) var ( diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/options.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/options.go index fe1e0762d..02891a330 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/options.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/options.go @@ -22,6 +22,11 @@ type LoggerOpts struct { // of the logging statement or not. IncludeLocation bool + // AdditionalLocationOffset is the number of additional stack levels to + // skip when finding the file and line information for the log line. + // Defaults to 1 to account for the tflog and tfsdklog logging functions. + AdditionalLocationOffset int + // Output dictates where logs are written to. Output should only ever // be set by tflog or tfsdklog, never by SDK authors or provider // developers. Where logs get written to is complex and delicate and @@ -34,17 +39,23 @@ type LoggerOpts struct { // tfsdklog; providers and SDKs should always include the time logs // were written as part of the log. IncludeTime bool + + // IncludeRootFields indicates whether a new subsystem logger should + // copy existing fields from the root logger. This is only performed + // at the time of new subsystem creation. + IncludeRootFields bool } // ApplyLoggerOpts generates a LoggerOpts out of a list of Option -// implementations. By default, IncludeLocation is true, IncludeTime is true, -// and Output is os.Stderr. +// implementations. By default, AdditionalLocationOffset is 1, IncludeLocation +// is true, IncludeTime is true, and Output is os.Stderr. func ApplyLoggerOpts(opts ...Option) LoggerOpts { // set some defaults l := LoggerOpts{ - IncludeLocation: true, - IncludeTime: true, - Output: os.Stderr, + AdditionalLocationOffset: 1, + IncludeLocation: true, + IncludeTime: true, + Output: os.Stderr, } for _, opt := range opts { l = opt(l) @@ -52,6 +63,18 @@ func ApplyLoggerOpts(opts ...Option) LoggerOpts { return l } +// WithAdditionalLocationOffset sets the WithAdditionalLocationOffset +// configuration option, allowing implementations to fix location information +// when implementing helper functions. The default offset of 1 is automatically +// added to the provided value to account for the tflog and tfsdk logging +// functions. +func WithAdditionalLocationOffset(additionalLocationOffset int) Option { + return func(l LoggerOpts) LoggerOpts { + l.AdditionalLocationOffset = additionalLocationOffset + 1 + return l + } +} + // WithOutput sets the Output configuration option, controlling where logs get // written to. This is mostly used for testing (to write to os.Stdout, so the // test framework can compare it against the example output) and as a helper @@ -63,6 +86,25 @@ func WithOutput(output io.Writer) Option { } } +// WithRootFields enables the copying of root logger fields to a new subsystem +// logger during creation. +func WithRootFields() Option { + return func(l LoggerOpts) LoggerOpts { + l.IncludeRootFields = true + return l + } +} + +// WithoutLocation disables the location included with logging statements. It +// should only ever be used to make log output deterministic when testing +// terraform-plugin-log. +func WithoutLocation() Option { + return func(l LoggerOpts) LoggerOpts { + l.IncludeLocation = false + return l + } +} + // WithoutTimestamp disables the timestamp included with logging statements. It // should only ever be used to make log output deterministic when testing // terraform-plugin-log. diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/provider.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/provider.go index 897393c5d..b40a12f83 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/provider.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/provider.go @@ -16,12 +16,41 @@ func GetProviderRootLogger(ctx context.Context) hclog.Logger { return logger.(hclog.Logger) } +// GetProviderRootLoggerOptions returns the root logger options used for +// creating the root provider logger. If the root logger has not been created +// or the options are not present, it will return nil. +func GetProviderRootLoggerOptions(ctx context.Context) *hclog.LoggerOptions { + if GetProviderRootLogger(ctx) == nil { + return nil + } + + loggerOptionsRaw := ctx.Value(ProviderRootLoggerOptionsKey) + + if loggerOptionsRaw == nil { + return nil + } + + loggerOptions, ok := loggerOptionsRaw.(*hclog.LoggerOptions) + + if !ok { + return nil + } + + return loggerOptions +} + // SetProviderRootLogger sets `logger` as the root logger used for writing // logs from a provider. func SetProviderRootLogger(ctx context.Context, logger hclog.Logger) context.Context { return context.WithValue(ctx, ProviderRootLoggerKey, logger) } +// SetProviderRootLoggerOptions sets `loggerOptions` as the root logger options +// used for creating the provider root logger. +func SetProviderRootLoggerOptions(ctx context.Context, loggerOptions *hclog.LoggerOptions) context.Context { + return context.WithValue(ctx, ProviderRootLoggerOptionsKey, loggerOptions) +} + // NewProviderSubsystemLoggerWarning is the text included in log output when a // subsystem is auto-generated by terraform-plugin-log because it was used // before the provider instantiated it. diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sdk.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sdk.go index 99c577a9d..3c6772b53 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sdk.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sdk.go @@ -16,12 +16,41 @@ func GetSDKRootLogger(ctx context.Context) hclog.Logger { return logger.(hclog.Logger) } +// GetSDKRootLoggerOptions returns the root logger options used for +// creating the root SDK logger. If the root logger has not been created or +// the options are not present, it will return nil. +func GetSDKRootLoggerOptions(ctx context.Context) *hclog.LoggerOptions { + if GetSDKRootLogger(ctx) == nil { + return nil + } + + loggerOptionsRaw := ctx.Value(SDKRootLoggerOptionsKey) + + if loggerOptionsRaw == nil { + return nil + } + + loggerOptions, ok := loggerOptionsRaw.(*hclog.LoggerOptions) + + if !ok { + return nil + } + + return loggerOptions +} + // SetSDKRootLogger sets `logger` as the root logger used for writing logs from // an SDK. func SetSDKRootLogger(ctx context.Context, logger hclog.Logger) context.Context { return context.WithValue(ctx, SDKRootLoggerKey, logger) } +// SetSDKRootLoggerOptions sets `loggerOptions` as the root logger options +// used for creating the SDK root logger. +func SetSDKRootLoggerOptions(ctx context.Context, loggerOptions *hclog.LoggerOptions) context.Context { + return context.WithValue(ctx, SDKRootLoggerOptionsKey, loggerOptions) +} + // NewSDKSubsystemLoggerWarning is the text included in log output when a // subsystem is auto-generated by terraform-plugin-log because it was used // before the SDK instantiated it. diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sink.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sink.go new file mode 100644 index 000000000..d48f3bf4c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sink.go @@ -0,0 +1,51 @@ +package logging + +import ( + "context" + + "github.com/hashicorp/go-hclog" +) + +// GetSink returns the sink logger used for writing logs. +// If no sink logger has been created, it will return nil. +func GetSink(ctx context.Context) hclog.Logger { + logger := ctx.Value(SinkKey) + if logger == nil { + return nil + } + return logger.(hclog.Logger) +} + +// GetSinkOptions returns the root logger options used for +// creating the root SDK logger. If the root logger has not been created or +// the options are not present, it will return nil. +func GetSinkOptions(ctx context.Context) *hclog.LoggerOptions { + if GetSink(ctx) == nil { + return nil + } + + loggerOptionsRaw := ctx.Value(SinkOptionsKey) + + if loggerOptionsRaw == nil { + return nil + } + + loggerOptions, ok := loggerOptionsRaw.(*hclog.LoggerOptions) + + if !ok { + return nil + } + + return loggerOptions +} + +// SetSink sets `logger` as the sink logger used for writing logs. +func SetSink(ctx context.Context, logger hclog.Logger) context.Context { + return context.WithValue(ctx, SinkKey, logger) +} + +// SetSinkOptions sets `loggerOptions` as the root logger options +// used for creating the SDK root logger. +func SetSinkOptions(ctx context.Context, loggerOptions *hclog.LoggerOptions) context.Context { + return context.WithValue(ctx, SinkOptionsKey, loggerOptions) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tflog/options.go b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/options.go index 1b61eb55f..750177812 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/tflog/options.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/options.go @@ -12,6 +12,14 @@ import ( // to NewSubsystem prior to calling it. type Options []logging.Option +// WithAdditionalLocationOffset returns an option that allowing implementations +// to fix location information when implementing helper functions. The default +// offset of 1 is automatically added to the provided value to account for the +// tflog logging functions. +func WithAdditionalLocationOffset(additionalLocationOffset int) logging.Option { + return logging.WithAdditionalLocationOffset(additionalLocationOffset) +} + // WithLevelFromEnv returns an option that will set the level of the logger // based on the string in an environment variable. The environment variable // checked will be `name` and `subsystems`, joined by _ and in all caps. @@ -35,6 +43,12 @@ func WithLevel(level hclog.Level) logging.Option { } } +// WithRootFields enables the copying of root logger fields to a new subsystem +// logger during creation. +func WithRootFields() logging.Option { + return logging.WithRootFields() +} + // WithoutLocation returns an option that disables including the location of // the log line in the log output, which is on by default. This has no effect // when used with NewSubsystem. diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tflog/provider.go b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/provider.go index b05856df0..e450e3bb6 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/tflog/provider.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/provider.go @@ -3,6 +3,7 @@ package tflog import ( "context" + "github.com/hashicorp/terraform-plugin-log/internal/hclogutils" "github.com/hashicorp/terraform-plugin-log/internal/logging" ) @@ -21,10 +22,11 @@ func With(ctx context.Context, key string, value interface{}) context.Context { return logging.SetProviderRootLogger(ctx, logger.With(key, value)) } -// Trace logs `msg` at the trace level to the logger in `ctx`, with `args` as -// structured arguments in the log output. `args` is expected to be pairs of -// key and value. -func Trace(ctx context.Context, msg string, args ...interface{}) { +// Trace logs `msg` at the trace level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `With()` function, +// and across multiple maps. +func Trace(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetProviderRootLogger(ctx) if logger == nil { // this essentially should never happen in production @@ -34,13 +36,14 @@ func Trace(ctx context.Context, msg string, args ...interface{}) { // so just making this a no-op is fine return } - logger.Trace(msg, args...) + logger.Trace(msg, hclogutils.MapsToArgs(additionalFields...)...) } -// Debug logs `msg` at the debug level to the logger in `ctx`, with `args` as -// structured arguments in the log output. `args` is expected to be pairs of -// key and value. -func Debug(ctx context.Context, msg string, args ...interface{}) { +// Debug logs `msg` at the debug level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `With()` function, +// and across multiple maps. +func Debug(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetProviderRootLogger(ctx) if logger == nil { // this essentially should never happen in production @@ -50,13 +53,14 @@ func Debug(ctx context.Context, msg string, args ...interface{}) { // so just making this a no-op is fine return } - logger.Debug(msg, args...) + logger.Debug(msg, hclogutils.MapsToArgs(additionalFields...)...) } -// Info logs `msg` at the info level to the logger in `ctx`, with `args` as -// structured arguments in the log output. `args` is expected to be pairs of -// key and value. -func Info(ctx context.Context, msg string, args ...interface{}) { +// Info logs `msg` at the info level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `With()` function, +// and across multiple maps. +func Info(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetProviderRootLogger(ctx) if logger == nil { // this essentially should never happen in production @@ -66,13 +70,14 @@ func Info(ctx context.Context, msg string, args ...interface{}) { // so just making this a no-op is fine return } - logger.Info(msg, args...) + logger.Info(msg, hclogutils.MapsToArgs(additionalFields...)...) } -// Warn logs `msg` at the warn level to the logger in `ctx`, with `args` as -// structured arguments in the log output. `args` is expected to be pairs of -// key and value. -func Warn(ctx context.Context, msg string, args ...interface{}) { +// Warn logs `msg` at the warn level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `With()` function, +// and across multiple maps. +func Warn(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetProviderRootLogger(ctx) if logger == nil { // this essentially should never happen in production @@ -82,13 +87,14 @@ func Warn(ctx context.Context, msg string, args ...interface{}) { // so just making this a no-op is fine return } - logger.Warn(msg, args...) + logger.Warn(msg, hclogutils.MapsToArgs(additionalFields...)...) } -// Error logs `msg` at the error level to the logger in `ctx`, with `args` as -// structured arguments in the log output. `args` is expected to be pairs of -// key and value. -func Error(ctx context.Context, msg string, args ...interface{}) { +// Error logs `msg` at the error level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `With()` function, +// and across multiple maps. +func Error(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetProviderRootLogger(ctx) if logger == nil { // this essentially should never happen in production @@ -98,5 +104,5 @@ func Error(ctx context.Context, msg string, args ...interface{}) { // so just making this a no-op is fine return } - logger.Error(msg, args...) + logger.Error(msg, hclogutils.MapsToArgs(additionalFields...)...) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tflog/subsystem.go b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/subsystem.go index fbc564e47..5cf08f22b 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/tflog/subsystem.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/subsystem.go @@ -4,6 +4,7 @@ import ( "context" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/terraform-plugin-log/internal/hclogutils" "github.com/hashicorp/terraform-plugin-log/internal/logging" ) @@ -17,9 +18,10 @@ import ( // at other times. // // The only Options supported for subsystems are the Options for setting the -// level of the logger. +// level and additional location offset of the logger. func NewSubsystem(ctx context.Context, subsystem string, options ...logging.Option) context.Context { logger := logging.GetProviderRootLogger(ctx) + if logger == nil { // this essentially should never happen in production // the root logger for provider code should be injected @@ -28,11 +30,45 @@ func NewSubsystem(ctx context.Context, subsystem string, options ...logging.Opti // so just making this a no-op is fine return ctx } - subLogger := logger.Named(subsystem) + + loggerOptions := logging.GetProviderRootLoggerOptions(ctx) opts := logging.ApplyLoggerOpts(options...) + + // On the off-chance that the logger options are not available, fallback + // to creating a named logger. This will preserve the root logger options, + // but cannot make changes beyond the level due to the hclog.Logger + // interface. + if loggerOptions == nil { + subLogger := logger.Named(subsystem) + + if opts.AdditionalLocationOffset != 1 { + logger.Warn("Unable to create logging subsystem with AdditionalLocationOffset due to missing root logger options") + } + + if opts.Level != hclog.NoLevel { + subLogger.SetLevel(opts.Level) + } + + return logging.SetProviderSubsystemLogger(ctx, subsystem, subLogger) + } + + subLoggerOptions := hclogutils.LoggerOptionsCopy(loggerOptions) + subLoggerOptions.Name = subLoggerOptions.Name + "." + subsystem + + if opts.AdditionalLocationOffset != 1 { + subLoggerOptions.AdditionalLocationOffset = opts.AdditionalLocationOffset + } + if opts.Level != hclog.NoLevel { - subLogger.SetLevel(opts.Level) + subLoggerOptions.Level = opts.Level } + + subLogger := hclog.New(subLoggerOptions) + + if opts.IncludeRootFields { + subLogger = subLogger.With(logger.ImpliedArgs()...) + } + return logging.SetProviderSubsystemLogger(ctx, subsystem, subLogger) } @@ -42,6 +78,11 @@ func NewSubsystem(ctx context.Context, subsystem string, options ...logging.Opti func SubsystemWith(ctx context.Context, subsystem, key string, value interface{}) context.Context { logger := logging.GetProviderSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetProviderRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return ctx + } // create a new logger if one doesn't exist logger = logging.GetProviderSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewProviderSubsystemLoggerWarning) } @@ -49,61 +90,96 @@ func SubsystemWith(ctx context.Context, subsystem, key string, value interface{} } // SubsystemTrace logs `msg` at the trace level to the subsystem logger -// specified in `ctx`, with `args` as structured arguments in the log output. -// `args` is expected to be pairs of key and value. -func SubsystemTrace(ctx context.Context, subsystem, msg string, args ...interface{}) { +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemWith()` function, and across +// multiple maps. +func SubsystemTrace(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetProviderSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetProviderRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } // create a new logger if one doesn't exist logger = logging.GetProviderSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewProviderSubsystemLoggerWarning) } - logger.Trace(msg, args...) + logger.Trace(msg, hclogutils.MapsToArgs(additionalFields...)...) } // SubsystemDebug logs `msg` at the debug level to the subsystem logger -// specified in `ctx`, with `args` as structured arguments in the log output. -// `args` is expected to be pairs of key and value. -func SubsystemDebug(ctx context.Context, subsystem, msg string, args ...interface{}) { +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemWith()` function, and across +// multiple maps. +func SubsystemDebug(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetProviderSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetProviderRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } // create a new logger if one doesn't exist logger = logging.GetProviderSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewProviderSubsystemLoggerWarning) } - logger.Debug(msg, args...) + logger.Debug(msg, hclogutils.MapsToArgs(additionalFields...)...) } // SubsystemInfo logs `msg` at the info level to the subsystem logger -// specified in `ctx`, with `args` as structured arguments in the log output. -// `args` is expected to be pairs of key and value. -func SubsystemInfo(ctx context.Context, subsystem, msg string, args ...interface{}) { +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemWith()` function, and across +// multiple maps. +func SubsystemInfo(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetProviderSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetProviderRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } // create a new logger if one doesn't exist logger = logging.GetProviderSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewProviderSubsystemLoggerWarning) } - logger.Info(msg, args...) + logger.Info(msg, hclogutils.MapsToArgs(additionalFields...)...) } // SubsystemWarn logs `msg` at the warn level to the subsystem logger -// specified in `ctx`, with `args` as structured arguments in the log output. -// `args` is expected to be pairs of key and value. -func SubsystemWarn(ctx context.Context, subsystem, msg string, args ...interface{}) { +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemWith()` function, and across +// multiple maps. +func SubsystemWarn(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetProviderSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetProviderRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } // create a new logger if one doesn't exist logger = logging.GetProviderSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewProviderSubsystemLoggerWarning) } - logger.Warn(msg, args...) + logger.Warn(msg, hclogutils.MapsToArgs(additionalFields...)...) } // SubsystemError logs `msg` at the error level to the subsystem logger -// specified in `ctx`, with `args` as structured arguments in the log output. -// `args` is expected to be pairs of key and value. -func SubsystemError(ctx context.Context, subsystem, msg string, args ...interface{}) { +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemWith()` function, and across +// multiple maps. +func SubsystemError(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetProviderSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetProviderRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } // create a new logger if one doesn't exist logger = logging.GetProviderSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewProviderSubsystemLoggerWarning) } - logger.Error(msg, args...) + logger.Error(msg, hclogutils.MapsToArgs(additionalFields...)...) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/options.go b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/options.go index 2115babd7..b1ba8e51e 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/options.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/options.go @@ -13,6 +13,14 @@ import ( // them. type Options []logging.Option +// WithAdditionalLocationOffset returns an option that allowing implementations +// to fix location information when implementing helper functions. The default +// offset of 1 is automatically added to the provided value to account for the +// tfsdklog logging functions. +func WithAdditionalLocationOffset(additionalLocationOffset int) logging.Option { + return logging.WithAdditionalLocationOffset(additionalLocationOffset) +} + // WithLogName returns an option that will set the logger name explicitly to // `name`. This has no effect when used with NewSubsystem. func WithLogName(name string) logging.Option { @@ -45,6 +53,12 @@ func WithLevel(level hclog.Level) logging.Option { } } +// WithRootFields enables the copying of root logger fields to a new subsystem +// logger during creation. +func WithRootFields() logging.Option { + return logging.WithRootFields() +} + // WithoutLocation returns an option that disables including the location of // the log line in the log output, which is on by default. This has no effect // when used with NewSubsystem. diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sdk.go b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sdk.go index ec22d9fc6..a27b7f3b0 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sdk.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sdk.go @@ -4,6 +4,7 @@ import ( "context" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/terraform-plugin-log/internal/hclogutils" "github.com/hashicorp/terraform-plugin-log/internal/logging" ) @@ -12,14 +13,23 @@ import ( func NewRootSDKLogger(ctx context.Context, options ...logging.Option) context.Context { opts := logging.ApplyLoggerOpts(options...) if opts.Name == "" { - opts.Name = "sdk" + opts.Name = logging.DefaultSDKRootLoggerName } - if sink := getSink(ctx); sink != nil { + if sink := logging.GetSink(ctx); sink != nil { logger := sink.Named(opts.Name) + sinkLoggerOptions := logging.GetSinkOptions(ctx) + sdkLoggerOptions := hclogutils.LoggerOptionsCopy(sinkLoggerOptions) + sdkLoggerOptions.Name = opts.Name + if opts.Level != hclog.NoLevel { logger.SetLevel(opts.Level) + sdkLoggerOptions.Level = opts.Level } - return logging.SetSDKRootLogger(ctx, logger) + + ctx = logging.SetSDKRootLogger(ctx, logger) + ctx = logging.SetSDKRootLoggerOptions(ctx, sdkLoggerOptions) + + return ctx } if opts.Level == hclog.NoLevel { opts.Level = hclog.Trace @@ -32,9 +42,13 @@ func NewRootSDKLogger(ctx context.Context, options ...logging.Option) context.Co IncludeLocation: opts.IncludeLocation, DisableTime: !opts.IncludeTime, Output: opts.Output, - AdditionalLocationOffset: 1, + AdditionalLocationOffset: opts.AdditionalLocationOffset, } - return logging.SetSDKRootLogger(ctx, hclog.New(loggerOptions)) + + ctx = logging.SetSDKRootLogger(ctx, hclog.New(loggerOptions)) + ctx = logging.SetSDKRootLoggerOptions(ctx, loggerOptions) + + return ctx } // NewRootProviderLogger returns a new context.Context that contains a provider @@ -42,14 +56,23 @@ func NewRootSDKLogger(ctx context.Context, options ...logging.Option) context.Co func NewRootProviderLogger(ctx context.Context, options ...logging.Option) context.Context { opts := logging.ApplyLoggerOpts(options...) if opts.Name == "" { - opts.Name = "provider" + opts.Name = logging.DefaultProviderRootLoggerName } - if sink := getSink(ctx); sink != nil { + if sink := logging.GetSink(ctx); sink != nil { logger := sink.Named(opts.Name) + sinkLoggerOptions := logging.GetSinkOptions(ctx) + providerLoggerOptions := hclogutils.LoggerOptionsCopy(sinkLoggerOptions) + providerLoggerOptions.Name = opts.Name + if opts.Level != hclog.NoLevel { logger.SetLevel(opts.Level) + providerLoggerOptions.Level = opts.Level } - return logging.SetProviderRootLogger(ctx, logger) + + ctx = logging.SetProviderRootLogger(ctx, logger) + ctx = logging.SetProviderRootLoggerOptions(ctx, providerLoggerOptions) + + return ctx } if opts.Level == hclog.NoLevel { opts.Level = hclog.Trace @@ -62,9 +85,13 @@ func NewRootProviderLogger(ctx context.Context, options ...logging.Option) conte IncludeLocation: opts.IncludeLocation, DisableTime: !opts.IncludeTime, Output: opts.Output, - AdditionalLocationOffset: 1, + AdditionalLocationOffset: opts.AdditionalLocationOffset, } - return logging.SetProviderRootLogger(ctx, hclog.New(loggerOptions)) + + ctx = logging.SetProviderRootLogger(ctx, hclog.New(loggerOptions)) + ctx = logging.SetProviderRootLoggerOptions(ctx, loggerOptions) + + return ctx } // With returns a new context.Context that has a modified logger in it which @@ -81,10 +108,11 @@ func With(ctx context.Context, key string, value interface{}) context.Context { return logging.SetSDKRootLogger(ctx, logger.With(key, value)) } -// Trace logs `msg` at the trace level to the logger in `ctx`, with `args` as -// structured arguments in the log output. `args` is expected to be pairs of -// key and value. -func Trace(ctx context.Context, msg string, args ...interface{}) { +// Trace logs `msg` at the trace level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `With()` function, +// and across multiple maps. +func Trace(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetSDKRootLogger(ctx) if logger == nil { // this essentially should never happen in production the root @@ -93,13 +121,14 @@ func Trace(ctx context.Context, msg string, args ...interface{}) { // most so just making this a no-op is fine return } - logger.Trace(msg, args...) + logger.Trace(msg, hclogutils.MapsToArgs(additionalFields...)...) } -// Debug logs `msg` at the debug level to the logger in `ctx`, with `args` as -// structured arguments in the log output. `args` is expected to be pairs of -// key and value. -func Debug(ctx context.Context, msg string, args ...interface{}) { +// Debug logs `msg` at the debug level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `With()` function, +// and across multiple maps. +func Debug(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetSDKRootLogger(ctx) if logger == nil { // this essentially should never happen in production the root @@ -108,13 +137,14 @@ func Debug(ctx context.Context, msg string, args ...interface{}) { // most so just making this a no-op is fine return } - logger.Debug(msg, args...) + logger.Debug(msg, hclogutils.MapsToArgs(additionalFields...)...) } -// Info logs `msg` at the info level to the logger in `ctx`, with `args` as -// structured arguments in the log output. `args` is expected to be pairs of -// key and value. -func Info(ctx context.Context, msg string, args ...interface{}) { +// Info logs `msg` at the info level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `With()` function, +// and across multiple maps. +func Info(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetSDKRootLogger(ctx) if logger == nil { // this essentially should never happen in production the root @@ -123,13 +153,14 @@ func Info(ctx context.Context, msg string, args ...interface{}) { // most so just making this a no-op is fine return } - logger.Info(msg, args...) + logger.Info(msg, hclogutils.MapsToArgs(additionalFields...)...) } -// Warn logs `msg` at the warn level to the logger in `ctx`, with `args` as -// structured arguments in the log output. `args` is expected to be pairs of -// key and value. -func Warn(ctx context.Context, msg string, args ...interface{}) { +// Warn logs `msg` at the warn level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `With()` function, +// and across multiple maps. +func Warn(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetSDKRootLogger(ctx) if logger == nil { // this essentially should never happen in production the root @@ -138,13 +169,14 @@ func Warn(ctx context.Context, msg string, args ...interface{}) { // most so just making this a no-op is fine return } - logger.Warn(msg, args...) + logger.Warn(msg, hclogutils.MapsToArgs(additionalFields...)...) } -// Error logs `msg` at the error level to the logger in `ctx`, with `args` as -// structured arguments in the log output. `args` is expected to be pairs of -// key and value. -func Error(ctx context.Context, msg string, args ...interface{}) { +// Error logs `msg` at the error level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `With()` function, +// and across multiple maps. +func Error(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetSDKRootLogger(ctx) if logger == nil { // this essentially should never happen in production the root @@ -153,5 +185,5 @@ func Error(ctx context.Context, msg string, args ...interface{}) { // most so just making this a no-op is fine return } - logger.Error(msg, args...) + logger.Error(msg, hclogutils.MapsToArgs(additionalFields...)...) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sink.go b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sink.go index 0b26a0890..6326901f1 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sink.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sink.go @@ -6,6 +6,7 @@ import ( "io" "os" "strings" + "sync" "syscall" "github.com/hashicorp/go-hclog" @@ -53,13 +54,8 @@ const ( // loggers. var ValidLevels = []string{"TRACE", "DEBUG", "INFO", "WARN", "ERROR", "OFF"} -func getSink(ctx context.Context) hclog.Logger { - logger := ctx.Value(logging.SinkKey) - if logger == nil { - return nil - } - return logger.(hclog.Logger) -} +// Only show invalid log level message once across any number of level lookups. +var invalidLogLevelMessage sync.Once // RegisterTestSink sets up a logging sink, for use with test frameworks and // other cases where plugin logs don't get routed through Terraform. This @@ -71,10 +67,15 @@ func getSink(ctx context.Context) hclog.Logger { // RegisterTestSink must be called prior to any loggers being setup or // instantiated. func RegisterTestSink(ctx context.Context, t testing.T) context.Context { - return context.WithValue(ctx, logging.SinkKey, newSink(t)) + logger, loggerOptions := newSink(t) + + ctx = logging.SetSink(ctx, logger) + ctx = logging.SetSinkOptions(ctx, loggerOptions) + + return ctx } -func newSink(t testing.T) hclog.Logger { +func newSink(t testing.T) (hclog.Logger, *hclog.LoggerOptions) { logOutput := io.Writer(os.Stderr) var json bool var logLevel hclog.Level @@ -120,21 +121,29 @@ func newSink(t testing.T) hclog.Logger { } else if isValidLogLevel(envLevel) { logLevel = hclog.LevelFromString(envLevel) } else { - fmt.Fprintf(os.Stderr, "[WARN] Invalid log level: %q. Defaulting to level: OFF. Valid levels are: %+v", - envLevel, ValidLevels) + invalidLogLevelMessage.Do(func() { + fmt.Fprintf( + os.Stderr, + "[WARN] Invalid log level: %q. Defaulting to level: OFF. Valid levels are: %+v\n", + envLevel, + ValidLevels, + ) + }) } - return hclog.New(&hclog.LoggerOptions{ + loggerOptions := &hclog.LoggerOptions{ Level: logLevel, Output: logOutput, IndependentLevels: true, JSONFormat: json, - }) + } + + return hclog.New(loggerOptions), loggerOptions } func isValidLogLevel(level string) bool { - for _, l := range ValidLevels { - if level == string(l) { + for _, validLevel := range ValidLevels { + if level == validLevel { return true } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/subsystem.go b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/subsystem.go index 96b76791f..3445288bc 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/subsystem.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/subsystem.go @@ -4,6 +4,7 @@ import ( "context" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/terraform-plugin-log/internal/hclogutils" "github.com/hashicorp/terraform-plugin-log/internal/logging" ) @@ -17,9 +18,10 @@ import ( // at other times. // // The only Options supported for subsystems are the Options for setting the -// level of the logger. +// level and additional location offset of the logger. func NewSubsystem(ctx context.Context, subsystem string, options ...logging.Option) context.Context { logger := logging.GetSDKRootLogger(ctx) + if logger == nil { // this essentially should never happen in production // the root logger for provider code should be injected @@ -28,11 +30,45 @@ func NewSubsystem(ctx context.Context, subsystem string, options ...logging.Opti // so just making this a no-op is fine return ctx } - subLogger := logger.Named(subsystem) + + loggerOptions := logging.GetSDKRootLoggerOptions(ctx) opts := logging.ApplyLoggerOpts(options...) + + // On the off-chance that the logger options are not available, fallback + // to creating a named logger. This will preserve the root logger options, + // but cannot make changes beyond the level due to the hclog.Logger + // interface. + if loggerOptions == nil { + subLogger := logger.Named(subsystem) + + if opts.AdditionalLocationOffset != 1 { + logger.Warn("Unable to create logging subsystem with AdditionalLocationOffset due to missing root logger options") + } + + if opts.Level != hclog.NoLevel { + subLogger.SetLevel(opts.Level) + } + + return logging.SetSDKSubsystemLogger(ctx, subsystem, subLogger) + } + + subLoggerOptions := hclogutils.LoggerOptionsCopy(loggerOptions) + subLoggerOptions.Name = subLoggerOptions.Name + "." + subsystem + + if opts.AdditionalLocationOffset != 1 { + subLoggerOptions.AdditionalLocationOffset = opts.AdditionalLocationOffset + } + if opts.Level != hclog.NoLevel { - subLogger.SetLevel(opts.Level) + subLoggerOptions.Level = opts.Level } + + subLogger := hclog.New(subLoggerOptions) + + if opts.IncludeRootFields { + subLogger = subLogger.With(logger.ImpliedArgs()...) + } + return logging.SetSDKSubsystemLogger(ctx, subsystem, subLogger) } @@ -42,6 +78,11 @@ func NewSubsystem(ctx context.Context, subsystem string, options ...logging.Opti func SubsystemWith(ctx context.Context, subsystem, key string, value interface{}) context.Context { logger := logging.GetSDKSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetSDKRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return ctx + } // create a new logger if one doesn't exist logger = logging.GetSDKSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewSDKSubsystemLoggerWarning) } @@ -49,61 +90,96 @@ func SubsystemWith(ctx context.Context, subsystem, key string, value interface{} } // SubsystemTrace logs `msg` at the trace level to the subsystem logger -// specified in `ctx`, with `args` as structured arguments in the log output. -// `args` is expected to be pairs of key and value. -func SubsystemTrace(ctx context.Context, subsystem, msg string, args ...interface{}) { +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemWith()` function, and across +// multiple maps. +func SubsystemTrace(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetSDKSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetSDKRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } // create a new logger if one doesn't exist logger = logging.GetSDKSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewSDKSubsystemLoggerWarning) } - logger.Trace(msg, args...) + logger.Trace(msg, hclogutils.MapsToArgs(additionalFields...)...) } // SubsystemDebug logs `msg` at the debug level to the subsystem logger -// specified in `ctx`, with `args` as structured arguments in the log output. -// `args` is expected to be pairs of key and value. -func SubsystemDebug(ctx context.Context, subsystem, msg string, args ...interface{}) { +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemWith()` function, and across +// multiple maps. +func SubsystemDebug(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetSDKSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetSDKRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } // create a new logger if one doesn't exist logger = logging.GetSDKSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewSDKSubsystemLoggerWarning) } - logger.Debug(msg, args...) + logger.Debug(msg, hclogutils.MapsToArgs(additionalFields...)...) } // SubsystemInfo logs `msg` at the info level to the subsystem logger -// specified in `ctx`, with `args` as structured arguments in the log output. -// `args` is expected to be pairs of key and value. -func SubsystemInfo(ctx context.Context, subsystem, msg string, args ...interface{}) { +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemWith()` function, and across +// multiple maps. +func SubsystemInfo(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetSDKSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetSDKRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } // create a new logger if one doesn't exist logger = logging.GetSDKSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewSDKSubsystemLoggerWarning) } - logger.Info(msg, args...) + logger.Info(msg, hclogutils.MapsToArgs(additionalFields...)...) } // SubsystemWarn logs `msg` at the warn level to the subsystem logger -// specified in `ctx`, with `args` as structured arguments in the log output. -// `args` is expected to be pairs of key and value. -func SubsystemWarn(ctx context.Context, subsystem, msg string, args ...interface{}) { +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemWith()` function, and across +// multiple maps. +func SubsystemWarn(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetSDKSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetSDKRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } // create a new logger if one doesn't exist logger = logging.GetSDKSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewSDKSubsystemLoggerWarning) } - logger.Warn(msg, args...) + logger.Warn(msg, hclogutils.MapsToArgs(additionalFields...)...) } // SubsystemError logs `msg` at the error level to the subsystem logger -// specified in `ctx`, with `args` as structured arguments in the log output. -// `args` is expected to be pairs of key and value. -func SubsystemError(ctx context.Context, subsystem, msg string, args ...interface{}) { +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemWith()` function, and across +// multiple maps. +func SubsystemError(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetSDKSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetSDKRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } // create a new logger if one doesn't exist logger = logging.GetSDKSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewSDKSubsystemLoggerWarning) } - logger.Error(msg, args...) + logger.Error(msg, hclogutils.MapsToArgs(additionalFields...)...) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest/random.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest/random.go index 12fdde9d5..263a1ff57 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest/random.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest/random.go @@ -120,8 +120,8 @@ func RandIpAddress(s string) (string, error) { last.SetBytes([]byte(lastIp)) r := &big.Int{} r.Sub(last, first) - if len := r.BitLen(); len > 31 { - return "", fmt.Errorf("CIDR range is too large: %d", len) + if bitLen := r.BitLen(); bitLen > 31 { + return "", fmt.Errorf("CIDR range is too large: %d", bitLen) } max := int(r.Int64()) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/logging.go index 01ae25dfb..235586aed 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/logging.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/logging.go @@ -1,7 +1,6 @@ package logging import ( - "context" "fmt" "io" "io/ioutil" @@ -11,7 +10,6 @@ import ( "syscall" "github.com/hashicorp/logutils" - "github.com/hashicorp/terraform-plugin-log/tfsdklog" testing "github.com/mitchellh/go-testing-interface" ) @@ -125,7 +123,7 @@ func LogLevel() string { // IsDebugOrHigher returns whether or not the current log level is debug or trace func IsDebugOrHigher() bool { - level := string(LogLevel()) + level := LogLevel() return level == "DEBUG" || level == "TRACE" } @@ -138,12 +136,3 @@ func isValidLogLevel(level string) bool { return false } - -// GetTestLogContext creates a context that is registered to the SDK log sink. -// This function is for internal usage only and is not supported by the project's -// compatibility promises. -func GetTestLogContext(t testing.T) context.Context { - ctx := context.Background() - ctx = tfsdklog.RegisterTestSink(ctx, t) - return ctx -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/transport.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/transport.go index bddabe647..6419605e7 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/transport.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/transport.go @@ -52,7 +52,7 @@ func prettyPrintJsonLines(b []byte) string { for i, p := range parts { if b := []byte(p); json.Valid(b) { var out bytes.Buffer - json.Indent(&out, b, "", " ") + _ = json.Indent(&out, b, "", " ") // already checked for validity parts[i] = out.String() } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/environment_variables.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/environment_variables.go new file mode 100644 index 000000000..fed6aa25c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/environment_variables.go @@ -0,0 +1,32 @@ +package resource + +// Environment variables for acceptance testing. Additional environment +// variable constants can be found in the internal/plugintest package. +const ( + // Environment variable to enable acceptance tests using this package's + // ParallelTest and Test functions whose TestCase does not enable the + // IsUnitTest field. Defaults to disabled, in which each test will call + // (*testing.T).Skip(). Can be set to any value to enable acceptance tests, + // however "1" is conventional. + EnvTfAcc = "TF_ACC" + + // Environment variable with hostname for the provider under acceptance + // test. The hostname is the first portion of the full provider source + // address, such as "example.com" in example.com/myorg/myprovider. Defaults + // to "registry.terraform.io". + // + // Only required if any Terraform configuration set via the TestStep + // type Config field includes a provider source, such as the terraform + // configuration block required_providers attribute. + EnvTfAccProviderHost = "TF_ACC_PROVIDER_HOST" + + // Environment variable with namespace for the provider under acceptance + // test. The namespace is the second portion of the full provider source + // address, such as "myorg" in registry.terraform.io/myorg/myprovider. + // Defaults to "-" for Terraform 0.12-0.13 compatibility and "hashicorp". + // + // Only required if any Terraform configuration set via the TestStep + // type Config field includes a provider source, such as the terraform + // configuration block required_providers attribute. + EnvTfAccProviderNamespace = "TF_ACC_PROVIDER_NAMESPACE" +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go index 2d649def0..9e52348d6 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io/ioutil" - "log" "os" "strings" "sync" @@ -13,29 +12,120 @@ import ( "github.com/hashicorp/terraform-exec/tfexec" "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-go/tfprotov6" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" testing "github.com/mitchellh/go-testing-interface" ) +// protov5ProviderFactory is a function which is called to start a protocol +// version 5 provider server. +type protov5ProviderFactory func() (tfprotov5.ProviderServer, error) + +// protov5ProviderFactories is a mapping of provider addresses to provider +// factory for protocol version 5 provider servers. +type protov5ProviderFactories map[string]func() (tfprotov5.ProviderServer, error) + +// merge combines provider factories. +// +// In case of an overlapping entry, the later entry will overwrite the previous +// value. +func (pf protov5ProviderFactories) merge(otherPfs ...protov5ProviderFactories) protov5ProviderFactories { + result := make(protov5ProviderFactories) + + for name, providerFactory := range pf { + result[name] = providerFactory + } + + for _, otherPf := range otherPfs { + for name, providerFactory := range otherPf { + result[name] = providerFactory + } + } + + return result +} + +// protov6ProviderFactory is a function which is called to start a protocol +// version 6 provider server. +type protov6ProviderFactory func() (tfprotov6.ProviderServer, error) + +// protov6ProviderFactories is a mapping of provider addresses to provider +// factory for protocol version 6 provider servers. +type protov6ProviderFactories map[string]func() (tfprotov6.ProviderServer, error) + +// merge combines provider factories. +// +// In case of an overlapping entry, the later entry will overwrite the previous +// value. +func (pf protov6ProviderFactories) merge(otherPfs ...protov6ProviderFactories) protov6ProviderFactories { + result := make(protov6ProviderFactories) + + for name, providerFactory := range pf { + result[name] = providerFactory + } + + for _, otherPf := range otherPfs { + for name, providerFactory := range otherPf { + result[name] = providerFactory + } + } + + return result +} + +// sdkProviderFactory is a function which is called to start a SDK provider +// server. +type sdkProviderFactory func() (*schema.Provider, error) + +// protov6ProviderFactories is a mapping of provider addresses to provider +// factory for protocol version 6 provider servers. +type sdkProviderFactories map[string]func() (*schema.Provider, error) + +// merge combines provider factories. +// +// In case of an overlapping entry, the later entry will overwrite the previous +// value. +func (pf sdkProviderFactories) merge(otherPfs ...sdkProviderFactories) sdkProviderFactories { + result := make(sdkProviderFactories) + + for name, providerFactory := range pf { + result[name] = providerFactory + } + + for _, otherPf := range otherPfs { + for name, providerFactory := range otherPf { + result[name] = providerFactory + } + } + + return result +} + type providerFactories struct { - legacy map[string]func() (*schema.Provider, error) - protov5 map[string]func() (tfprotov5.ProviderServer, error) - protov6 map[string]func() (tfprotov6.ProviderServer, error) + legacy sdkProviderFactories + protov5 protov5ProviderFactories + protov6 protov6ProviderFactories } -func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, factories providerFactories) error { +func runProviderCommand(ctx context.Context, t testing.T, f func() error, wd *plugintest.WorkingDir, factories *providerFactories) error { // don't point to this as a test failure location // point to whatever called it t.Helper() + // This should not happen, but prevent panics just in case. + if factories == nil { + err := fmt.Errorf("Provider factories are missing to run Terraform command. Please report this bug in the testing framework.") + logging.HelperResourceError(ctx, err.Error()) + return err + } + // Run the providers in the same process as the test runner using the // reattach behavior in Terraform. This ensures we get test coverage // and enables the use of delve as a debugger. - ctx, cancel := context.WithCancel(logging.GetTestLogContext(t)) + ctx, cancel := context.WithCancel(ctx) defer cancel() // this is needed so Terraform doesn't default to expecting protocol 4; @@ -43,8 +133,13 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, // plugins. os.Setenv("PLUGIN_PROTOCOL_VERSIONS", "5") - // Terraform doesn't need to reach out to Checkpoint during testing. - wd.Setenv("CHECKPOINT_DISABLE", "1") + // Acceptance testing does not need to call checkpoint as the output + // is not accessible, nor desirable if explicitly using + // TF_ACC_TERRAFORM_PATH or TF_ACC_TERRAFORM_VERSION environment variables. + // + // Avoid calling (tfexec.Terraform).SetEnv() as it will stop copying + // os.Environ() and prevents TF_VAR_ environment variable usage. + os.Setenv("CHECKPOINT_DISABLE", "1") // Terraform 0.12.X and 0.13.X+ treat namespaceless providers // differently in terms of what namespace they default to. So we're @@ -53,12 +148,12 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, // the host or namespace using environment variables. var namespaces []string host := "registry.terraform.io" - if v := os.Getenv("TF_ACC_PROVIDER_NAMESPACE"); v != "" { + if v := os.Getenv(EnvTfAccProviderNamespace); v != "" { namespaces = append(namespaces, v) } else { namespaces = append(namespaces, "-", "hashicorp") } - if v := os.Getenv("TF_ACC_PROVIDER_HOST"); v != "" { + if v := os.Getenv(EnvTfAccProviderHost); v != "" { host = v } @@ -70,12 +165,17 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, // providerName may be returned as terraform-provider-foo, and // we need just foo. So let's fix that. providerName = strings.TrimPrefix(providerName, "terraform-provider-") + providerAddress := getProviderAddr(providerName) + + logging.HelperResourceDebug(ctx, "Creating sdkv2 provider instance", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) provider, err := factory() if err != nil { return fmt.Errorf("unable to create provider %q from factory: %w", providerName, err) } + logging.HelperResourceDebug(ctx, "Created sdkv2 provider instance", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + // keep track of the running factory, so we can make sure it's // shut down. wg.Add(1) @@ -95,14 +195,18 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, }), NoLogOutputOverride: true, UseTFLogSink: t, - ProviderAddr: getProviderAddr(providerName), + ProviderAddr: providerAddress, } - // let's actually start the provider server + logging.HelperResourceDebug(ctx, "Starting sdkv2 provider instance server", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + config, closeCh, err := plugin.DebugServe(ctx, opts) if err != nil { return fmt.Errorf("unable to serve provider %q: %w", providerName, err) } + + logging.HelperResourceDebug(ctx, "Started sdkv2 provider instance server", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + tfexecConfig := tfexec.ReattachConfig{ Protocol: config.Protocol, ProtocolVersion: config.ProtocolVersion, @@ -137,6 +241,7 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, // providerName may be returned as terraform-provider-foo, and // we need just foo. So let's fix that. providerName = strings.TrimPrefix(providerName, "terraform-provider-") + providerAddress := getProviderAddr(providerName) // If the user has supplied the same provider in both // ProviderFactories and ProtoV5ProviderFactories, they made a @@ -150,11 +255,15 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, } } + logging.HelperResourceDebug(ctx, "Creating tfprotov5 provider instance", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + provider, err := factory() if err != nil { return fmt.Errorf("unable to create provider %q from factory: %w", providerName, err) } + logging.HelperResourceDebug(ctx, "Created tfprotov5 provider instance", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + // keep track of the running factory, so we can make sure it's // shut down. wg.Add(1) @@ -174,14 +283,18 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, }), NoLogOutputOverride: true, UseTFLogSink: t, - ProviderAddr: getProviderAddr(providerName), + ProviderAddr: providerAddress, } - // let's actually start the provider server + logging.HelperResourceDebug(ctx, "Starting tfprotov5 provider instance server", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + config, closeCh, err := plugin.DebugServe(ctx, opts) if err != nil { return fmt.Errorf("unable to serve provider %q: %w", providerName, err) } + + logging.HelperResourceDebug(ctx, "Started tfprotov5 provider instance server", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + tfexecConfig := tfexec.ReattachConfig{ Protocol: config.Protocol, ProtocolVersion: config.ProtocolVersion, @@ -217,6 +330,7 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, // providerName may be returned as terraform-provider-foo, and // we need just foo. So let's fix that. providerName = strings.TrimPrefix(providerName, "terraform-provider-") + providerAddress := getProviderAddr(providerName) // If the user has already registered this provider in // ProviderFactories or ProtoV5ProviderFactories, they made a @@ -230,11 +344,15 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, } } + logging.HelperResourceDebug(ctx, "Creating tfprotov6 provider instance", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + provider, err := factory() if err != nil { return fmt.Errorf("unable to create provider %q from factory: %w", providerName, err) } + logging.HelperResourceDebug(ctx, "Created tfprotov6 provider instance", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + // keep track of the running factory, so we can make sure it's // shut down. wg.Add(1) @@ -250,15 +368,18 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, }), NoLogOutputOverride: true, UseTFLogSink: t, - ProviderAddr: getProviderAddr(providerName), + ProviderAddr: providerAddress, } - // let's actually start the provider server + logging.HelperResourceDebug(ctx, "Starting tfprotov6 provider instance server", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + config, closeCh, err := plugin.DebugServe(ctx, opts) if err != nil { return fmt.Errorf("unable to serve provider %q: %w", providerName, err) } + logging.HelperResourceDebug(ctx, "Started tfprotov6 provider instance server", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + tfexecConfig := tfexec.ReattachConfig{ Protocol: config.Protocol, ProtocolVersion: config.ProtocolVersion, @@ -290,26 +411,35 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, // set the working directory reattach info that will tell Terraform how to // connect to our various running servers. - wd.SetReattachInfo(reattachInfo) + wd.SetReattachInfo(ctx, reattachInfo) + + logging.HelperResourceTrace(ctx, "Calling wrapped Terraform CLI command") // ok, let's call whatever Terraform command the test was trying to // call, now that we know it'll attach back to those servers we just // started. err := f() if err != nil { - log.Printf("[WARN] Got error running Terraform: %s", err) + logging.HelperResourceWarn(ctx, "Error running Terraform CLI command", map[string]interface{}{logging.KeyError: err}) } + logging.HelperResourceTrace(ctx, "Called wrapped Terraform CLI command") + logging.HelperResourceDebug(ctx, "Stopping providers") + // cancel the servers so they'll return. Otherwise, this closeCh won't // get closed, and we'll hang here. cancel() + logging.HelperResourceTrace(ctx, "Waiting for providers to stop") + // wait for the servers to actually shut down; it may take a moment for // them to clean up, or whatever. // TODO: add a timeout here? // PC: do we need one? The test will time out automatically... wg.Wait() + logging.HelperResourceTrace(ctx, "Providers have successfully stopped") + // once we've run the Terraform command, let's remove the reattach // information from the WorkingDir's environment. The WorkingDir will // persist until the next call, but the server in the reattach info @@ -327,10 +457,10 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, func getProviderAddr(name string) string { host := "registry.terraform.io" namespace := "hashicorp" - if v := os.Getenv("TF_ACC_PROVIDER_NAMESPACE"); v != "" { + if v := os.Getenv(EnvTfAccProviderNamespace); v != "" { namespace = v } - if v := os.Getenv("TF_ACC_PROVIDER_HOST"); v != "" { + if v := os.Getenv(EnvTfAccProviderHost); v != "" { host = v } return strings.TrimSuffix(host, "/") + "/" + diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go index 7f8c22529..497bcc1c9 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go @@ -226,7 +226,7 @@ func (sf *shimmedFlatmap) AddMap(prefix string, m map[string]interface{}) error err := sf.AddEntry(k, value) if err != nil { - return err + return fmt.Errorf("unable to add map key %q entry: %w", k, err) } } @@ -235,7 +235,9 @@ func (sf *shimmedFlatmap) AddMap(prefix string, m map[string]interface{}) error mapLength = fmt.Sprintf("%s.%s", prefix, "%") } - sf.AddEntry(mapLength, strconv.Itoa(len(m))) + if err := sf.AddEntry(mapLength, strconv.Itoa(len(m))); err != nil { + return fmt.Errorf("unable to add map length %q entry: %w", mapLength, err) + } return nil } @@ -245,12 +247,14 @@ func (sf *shimmedFlatmap) AddSlice(name string, elements []interface{}) error { key := fmt.Sprintf("%s.%d", name, i) err := sf.AddEntry(key, elem) if err != nil { - return err + return fmt.Errorf("unable to add slice key %q entry: %w", key, err) } } sliceLength := fmt.Sprintf("%s.#", name) - sf.AddEntry(sliceLength, strconv.Itoa(len(elements))) + if err := sf.AddEntry(sliceLength, strconv.Itoa(len(elements))); err != nil { + return fmt.Errorf("unable to add slice length %q entry: %w", sliceLength, err) + } return nil } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_providers.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_providers.go new file mode 100644 index 000000000..c09e4657c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_providers.go @@ -0,0 +1,56 @@ +package resource + +import ( + "context" + "fmt" + "strings" +) + +// providerConfig takes the list of providers in a TestCase and returns a +// config with only empty provider blocks. This is useful for Import, where no +// config is provided, but the providers must be defined. +func (c TestCase) providerConfig(_ context.Context) string { + var providerBlocks, requiredProviderBlocks strings.Builder + + // [BF] The Providers field handling predates the logic being moved to this + // method. It's not entirely clear to me at this time why this field + // is being used and not the others, but leaving it here just in case + // it does have a special purpose that wasn't being unit tested prior. + for name := range c.Providers { + providerBlocks.WriteString(fmt.Sprintf("provider %q {}\n", name)) + } + + for name, externalProvider := range c.ExternalProviders { + providerBlocks.WriteString(fmt.Sprintf("provider %q {}\n", name)) + + if externalProvider.Source == "" && externalProvider.VersionConstraint == "" { + continue + } + + requiredProviderBlocks.WriteString(fmt.Sprintf(" %s = {\n", name)) + + if externalProvider.Source != "" { + requiredProviderBlocks.WriteString(fmt.Sprintf(" source = %q\n", externalProvider.Source)) + } + + if externalProvider.VersionConstraint != "" { + requiredProviderBlocks.WriteString(fmt.Sprintf(" version = %q\n", externalProvider.VersionConstraint)) + } + + requiredProviderBlocks.WriteString(" }\n") + } + + if requiredProviderBlocks.Len() > 0 { + return fmt.Sprintf(` +terraform { + required_providers { +%[1]s + } +} + +%[2]s +`, strings.TrimSuffix(requiredProviderBlocks.String(), "\n"), providerBlocks.String()) + } + + return providerBlocks.String() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_validate.go new file mode 100644 index 000000000..39e5da46c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_validate.go @@ -0,0 +1,85 @@ +package resource + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" +) + +// hasProviders returns true if the TestCase has set any of the +// ExternalProviders, ProtoV5ProviderFactories, ProtoV6ProviderFactories, +// ProviderFactories, or Providers fields. +func (c TestCase) hasProviders(_ context.Context) bool { + if len(c.ExternalProviders) > 0 { + return true + } + + if len(c.ProtoV5ProviderFactories) > 0 { + return true + } + + if len(c.ProtoV6ProviderFactories) > 0 { + return true + } + + if len(c.ProviderFactories) > 0 { + return true + } + + if len(c.Providers) > 0 { + return true + } + + return false +} + +// validate ensures the TestCase is valid based on the following criteria: +// +// - No overlapping ExternalProviders and Providers entries +// - No overlapping ExternalProviders and ProviderFactories entries +// - TestStep validations performed by the (TestStep).validate() method. +// +func (c TestCase) validate(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Validating TestCase") + + if len(c.Steps) == 0 { + err := fmt.Errorf("TestCase missing Steps") + logging.HelperResourceError(ctx, "TestCase validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + for name := range c.ExternalProviders { + if _, ok := c.Providers[name]; ok { + err := fmt.Errorf("TestCase provider %q set in both ExternalProviders and Providers", name) + logging.HelperResourceError(ctx, "TestCase validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if _, ok := c.ProviderFactories[name]; ok { + err := fmt.Errorf("TestCase provider %q set in both ExternalProviders and ProviderFactories", name) + logging.HelperResourceError(ctx, "TestCase validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + } + + testCaseHasProviders := c.hasProviders(ctx) + + for stepIndex, step := range c.Steps { + stepNumber := stepIndex + 1 // Use 1-based index for humans + stepValidateReq := testStepValidateRequest{ + StepNumber: stepNumber, + TestCaseHasProviders: testCaseHasProviders, + } + + err := step.validate(ctx, stepValidateReq) + + if err != nil { + err := fmt.Errorf("TestStep %d/%d validation error: %w", stepNumber, len(c.Steps), err) + logging.HelperResourceError(ctx, "TestCase validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go index 6d82d8017..8047a2cdc 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go @@ -1,6 +1,7 @@ package resource import ( + "context" "errors" "flag" "fmt" @@ -12,13 +13,14 @@ import ( "time" "github.com/hashicorp/go-multierror" - testing "github.com/mitchellh/go-testing-interface" + "github.com/mitchellh/go-testing-interface" "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-go/tfprotov6" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) @@ -48,7 +50,7 @@ var flagSweepAllowFailures = flag.Bool("sweep-allow-failures", false, "Enable to var flagSweepRun = flag.String("sweep-run", "", "Comma seperated list of Sweeper Tests to run") var sweeperFuncs map[string]*Sweeper -// type SweeperFunc is a signature for a function that acts as a sweeper. It +// SweeperFunc is a signature for a function that acts as a sweeper. It // accepts a string for the region that the sweeper is to be ran in. This // function must be able to construct a valid client for that region. type SweeperFunc func(r string) error @@ -84,6 +86,27 @@ func AddTestSweepers(name string, s *Sweeper) { sweeperFuncs[name] = s } +// TestMain adds sweeper functionality to the "go test" command, otherwise +// tests are executed as normal. Most provider acceptance tests are written +// using the Test() function of this package, which imposes its own +// requirements and Terraform CLI behavior. Refer to that function's +// documentation for additional details. +// +// Sweepers enable infrastructure cleanup functions to be included with +// resource definitions, typically so developers can remove all resources of +// that resource type from testing infrastructure in case of failures that +// prevented the normal resource destruction behavior of acceptance tests. +// Use the AddTestSweepers() function to configure available sweepers. +// +// Sweeper flags added to the "go test" command: +// +// -sweep: Comma-separated list of locations/regions to run available sweepers. +// -sweep-allow-failues: Enable to allow other sweepers to run after failures. +// -sweep-run: Comma-separated list of resource type sweepers to run. Defaults +// to all sweepers. +// +// Refer to the Env prefixed constants for environment variables that further +// control testing functionality. func TestMain(m interface { Run() int }) { @@ -126,7 +149,7 @@ func runSweepers(regions []string, sweepers map[string]*Sweeper, allowFailures b return sweeperRunList, fmt.Errorf("sweeper (%s) for region (%s) failed: %s", sweeper.Name, region, err) } } - elapsed := time.Now().Sub(start) + elapsed := time.Since(start) log.Printf("Completed Sweepers for region (%s) in %s", region, elapsed) log.Printf("Sweeper Tests for region (%s) ran successfully:\n", region) @@ -239,7 +262,7 @@ func runSweeperWithRegion(region string, s *Sweeper, sweepers map[string]*Sweepe start := time.Now() runE := s.F(region) - elapsed := time.Now().Sub(start) + elapsed := time.Since(start) log.Printf("[DEBUG] Completed Sweeper (%s) in region (%s) in %s", s.Name, region, elapsed) @@ -252,7 +275,8 @@ func runSweeperWithRegion(region string, s *Sweeper, sweepers map[string]*Sweepe return runE } -const TestEnvVar = "TF_ACC" +// Deprecated: Use EnvTfAcc instead. +const TestEnvVar = EnvTfAcc // TestCheckFunc is the callback type used with acceptance tests to check // the state of a resource. The state passed in is the latest state known, @@ -275,6 +299,9 @@ type ErrorCheckFunc func(error) error // // When the destroy plan is executed, the config from the last TestStep // is used to plan it. +// +// Refer to the Env prefixed constants for environment variables that further +// control testing functionality. type TestCase struct { // IsUnitTest allows a test to run regardless of the TF_ACC // environment variable. This should be used with care - only for @@ -291,6 +318,11 @@ type TestCase struct { // ProviderFactories can be specified for the providers that are valid. // + // This can also be specified at the TestStep level to enable per-step + // differences in providers, however all provider specifications must + // be done either at the TestCase level or TestStep level, otherwise the + // testing framework will raise an error and fail the test. + // // These are the providers that can be referenced within the test. Each key // is an individually addressable provider. Typically you will only pass a // single value here for the provider you are testing. Aliases are not @@ -312,6 +344,11 @@ type TestCase struct { // ProtoV5ProviderFactories serves the same purpose as ProviderFactories, // but for protocol v5 providers defined using the terraform-plugin-go // ProviderServer interface. + // + // This can also be specified at the TestStep level to enable per-step + // differences in providers, however all provider specifications must + // be done either at the TestCase level or TestStep level, otherwise the + // testing framework will raise an error and fail the test. ProtoV5ProviderFactories map[string]func() (tfprotov5.ProviderServer, error) // ProtoV6ProviderFactories serves the same purpose as ProviderFactories, @@ -319,6 +356,11 @@ type TestCase struct { // ProviderServer interface. // The version of Terraform used in acceptance testing must be greater // than or equal to v0.15.4 to use ProtoV6ProviderFactories. + // + // This can also be specified at the TestStep level to enable per-step + // differences in providers, however all provider specifications must + // be done either at the TestCase level or TestStep level, otherwise the + // testing framework will raise an error and fail the test. ProtoV6ProviderFactories map[string]func() (tfprotov6.ProviderServer, error) // Providers is the ResourceProvider that will be under test. @@ -327,11 +369,18 @@ type TestCase struct { Providers map[string]*schema.Provider // ExternalProviders are providers the TestCase relies on that should - // be downloaded from the registry during init. This is only really - // necessary to set if you're using import, as providers in your config - // will be automatically retrieved during init. Import doesn't use a - // config, however, so we allow manually specifying them here to be - // downloaded for import tests. + // be downloaded from the registry during init. + // + // This can also be specified at the TestStep level to enable per-step + // differences in providers, however all provider specifications must + // be done either at the TestCase level or TestStep level, otherwise the + // testing framework will raise an error and fail the test. + // + // This is generally unnecessary to set at the TestCase level, however + // it has existing in the testing framework prior to the introduction of + // TestStep level specification and was only necessary for performing + // import testing where the configuration contained a provider outside the + // one under test. ExternalProviders map[string]ExternalProvider // PreventPostDestroyRefresh can be set to true for cases where data sources @@ -350,16 +399,18 @@ type TestCase struct { // same state. Each step can have its own check to verify correctness. Steps []TestStep - // The settings below control the "ID-only refresh test." This is - // an enabled-by-default test that tests that a refresh can be - // refreshed with only an ID to result in the same attributes. - // This validates completeness of Refresh. - // - // IDRefreshName is the name of the resource to check. This will - // default to the first non-nil primary resource in the state. + // IDRefreshName is the name of the resource to check during ID-only + // refresh testing, which ensures that a resource can be refreshed solely + // by its identifier. This will default to the first non-nil primary + // resource in the state. It runs every TestStep. // - // IDRefreshIgnore is a list of configuration keys that will be ignored. - IDRefreshName string + // While not deprecated, most resource tests should instead prefer using + // TestStep.ImportState based testing as it works with multiple attribute + // identifiers and also verifies resource import functionality. + IDRefreshName string + + // IDRefreshIgnore is a list of configuration keys that will be ignored + // during ID-only refresh testing. IDRefreshIgnore []string } @@ -376,6 +427,9 @@ type ExternalProvider struct { // Multiple TestSteps can be sequenced in a Test to allow testing // potentially complex update logic. In general, simply create/destroy // tests will only need one step. +// +// Refer to the Env prefixed constants for environment variables that further +// control testing functionality. type TestStep struct { // ResourceName should be set to the name of the resource // that is being tested. Example: "aws_instance.foo". Various test @@ -414,6 +468,9 @@ type TestStep struct { // Config a string of the configuration to give to Terraform. If this // is set, then the TestCase will execute this step with the same logic // as a `terraform apply`. + // + // JSON Configuration Syntax can be used and is assumed whenever Config + // contains valid JSON. Config string // Check is called after the Config is applied. Use this step to @@ -452,8 +509,15 @@ type TestStep struct { // are tested alongside real resources PreventPostDestroyRefresh bool - // SkipFunc is called before applying config, but after PreConfig - // This is useful for defining test steps with platform-dependent checks + // SkipFunc enables skipping the TestStep, based on environment criteria. + // For example, this can prevent running certain steps that may be runtime + // platform or API configuration dependent. + // + // Return true with no error to skip the test step. The error return + // should be used to signify issues that prevented the function from + // completing as expected. + // + // SkipFunc is called after PreConfig but before applying the Config. SkipFunc func() (bool, error) //--------------------------------------------------------------- @@ -498,14 +562,84 @@ type TestStep struct { // fields that can't be refreshed and don't matter. ImportStateVerify bool ImportStateVerifyIgnore []string + + // ProviderFactories can be specified for the providers that are valid for + // this TestStep. When providers are specified at the TestStep level, all + // TestStep within a TestCase must declare providers. + // + // This can also be specified at the TestCase level for all TestStep, + // however all provider specifications must be done either at the TestCase + // level or TestStep level, otherwise the testing framework will raise an + // error and fail the test. + // + // These are the providers that can be referenced within the test. Each key + // is an individually addressable provider. Typically you will only pass a + // single value here for the provider you are testing. Aliases are not + // supported by the test framework, so to use multiple provider instances, + // you should add additional copies to this map with unique names. To set + // their configuration, you would reference them similar to the following: + // + // provider "my_factory_key" { + // # ... + // } + // + // resource "my_resource" "mr" { + // provider = my_factory_key + // + // # ... + // } + ProviderFactories map[string]func() (*schema.Provider, error) + + // ProtoV5ProviderFactories serves the same purpose as ProviderFactories, + // but for protocol v5 providers defined using the terraform-plugin-go + // ProviderServer interface. When providers are specified at the TestStep + // level, all TestStep within a TestCase must declare providers. + // + // This can also be specified at the TestCase level for all TestStep, + // however all provider specifications must be done either at the TestCase + // level or TestStep level, otherwise the testing framework will raise an + // error and fail the test. + ProtoV5ProviderFactories map[string]func() (tfprotov5.ProviderServer, error) + + // ProtoV6ProviderFactories serves the same purpose as ProviderFactories, + // but for protocol v6 providers defined using the terraform-plugin-go + // ProviderServer interface. + // The version of Terraform used in acceptance testing must be greater + // than or equal to v0.15.4 to use ProtoV6ProviderFactories. When providers + // are specified at the TestStep level, all TestStep within a TestCase must + // declare providers. + // + // This can also be specified at the TestCase level for all TestStep, + // however all provider specifications must be done either at the TestCase + // level or TestStep level, otherwise the testing framework will raise an + // error and fail the test. + ProtoV6ProviderFactories map[string]func() (tfprotov6.ProviderServer, error) + + // ExternalProviders are providers the TestStep relies on that should + // be downloaded from the registry during init. When providers are + // specified at the TestStep level, all TestStep within a TestCase must + // declare providers. + // + // This can also be specified at the TestCase level for all TestStep, + // however all provider specifications must be done either at the TestCase + // level or TestStep level, otherwise the testing framework will raise an + // error and fail the test. + // + // Outside specifying an earlier version of the provider under test, + // typically for state upgrader testing, this is generally only necessary + // for performing import testing where the prior TestStep configuration + // contained a provider outside the one under test. + ExternalProviders map[string]ExternalProvider } // ParallelTest performs an acceptance test on a resource, allowing concurrency -// with other ParallelTest. +// with other ParallelTest. The number of concurrent tests is controlled by the +// "go test" command -parallel flag. // // Tests will fail if they do not properly handle conditions to allow multiple // tests to occur against the same resource or service (e.g. random naming). -// All other requirements of the Test function also apply to this function. +// +// Test() function requirements and documentation also apply to this function. func ParallelTest(t testing.T, c TestCase) { t.Helper() t.Parallel() @@ -522,98 +656,100 @@ func ParallelTest(t testing.T, c TestCase) { // the "-test.v" flag) is set. Because some acceptance tests take quite // long, we require the verbose flag so users are able to see progress // output. +// +// Use the ParallelTest() function to automatically set (*testing.T).Parallel() +// to enable testing concurrency. Use the UnitTest() function to automatically +// set the TestCase type IsUnitTest field. +// +// This function will automatically find or install Terraform CLI into a +// temporary directory, based on the following behavior: +// +// - If the TF_ACC_TERRAFORM_PATH environment variable is set, that +// Terraform CLI binary is used if found and executable. If not found or +// executable, an error will be returned unless the +// TF_ACC_TERRAFORM_VERSION environment variable is also set. +// - If the TF_ACC_TERRAFORM_VERSION environment variable is set, install +// and use that Terraform CLI version. +// - If both the TF_ACC_TERRAFORM_PATH and TF_ACC_TERRAFORM_VERSION +// environment variables are unset, perform a lookup for the Terraform +// CLI binary based on the operating system PATH. If not found, the +// latest available Terraform CLI binary is installed. +// +// Refer to the Env prefixed constants for additional details about these +// environment variables, and others, that control testing functionality. func Test(t testing.T, c TestCase) { t.Helper() + ctx := context.Background() + ctx = logging.InitTestContext(ctx, t) + + err := c.validate(ctx) + + if err != nil { + logging.HelperResourceError(ctx, + "Test validation error", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Test validation error: %s", err) + } + // We only run acceptance tests if an env var is set because they're // slow and generally require some outside configuration. You can opt out // of this with OverrideEnvVar on individual TestCases. - if os.Getenv(TestEnvVar) == "" && !c.IsUnitTest { + if os.Getenv(EnvTfAcc) == "" && !c.IsUnitTest { t.Skip(fmt.Sprintf( "Acceptance tests skipped unless env '%s' set", - TestEnvVar)) + EnvTfAcc)) return } - logging.SetOutput(t) - // Copy any explicitly passed providers to factories, this is for backwards compatibility. if len(c.Providers) > 0 { c.ProviderFactories = map[string]func() (*schema.Provider, error){} for name, p := range c.Providers { - if _, ok := c.ProviderFactories[name]; ok { - t.Fatalf("ProviderFactory for %q already exists, cannot overwrite with Provider", name) - } prov := p - c.ProviderFactories[name] = func() (*schema.Provider, error) { + c.ProviderFactories[name] = func() (*schema.Provider, error) { //nolint:unparam // required signature return prov, nil } } } + logging.HelperResourceDebug(ctx, "Starting TestCase") + // Run the PreCheck if we have it. // This is done after the auto-configure to allow providers // to override the default auto-configure parameters. if c.PreCheck != nil { + logging.HelperResourceDebug(ctx, "Calling TestCase PreCheck") + c.PreCheck() + + logging.HelperResourceDebug(ctx, "Called TestCase PreCheck") } sourceDir, err := os.Getwd() if err != nil { t.Fatalf("Error getting working dir: %s", err) } - helper := plugintest.AutoInitProviderHelper(sourceDir) + helper := plugintest.AutoInitProviderHelper(ctx, sourceDir) defer func(helper *plugintest.Helper) { err := helper.Close() if err != nil { - log.Printf("Error cleaning up temporary test files: %s", err) + logging.HelperResourceError(ctx, "Unable to clean up temporary test files", map[string]interface{}{logging.KeyError: err}) } }(helper) - runNewTest(t, c, helper) -} - -// testProviderConfig takes the list of Providers in a TestCase and returns a -// config with only empty provider blocks. This is useful for Import, where no -// config is provided, but the providers must be defined. -func testProviderConfig(c TestCase) (string, error) { - var lines []string - var requiredProviders []string - for p := range c.Providers { - lines = append(lines, fmt.Sprintf("provider %q {}\n", p)) - } - for p, v := range c.ExternalProviders { - if _, ok := c.Providers[p]; ok { - return "", fmt.Errorf("Provider %q set in both Providers and ExternalProviders for TestCase. Must be set in only one.", p) - } - if _, ok := c.ProviderFactories[p]; ok { - return "", fmt.Errorf("Provider %q set in both ProviderFactories and ExternalProviders for TestCase. Must be set in only one.", p) - } - lines = append(lines, fmt.Sprintf("provider %q {}\n", p)) - var providerBlock string - if v.VersionConstraint != "" { - providerBlock = fmt.Sprintf("%s\nversion = %q", providerBlock, v.VersionConstraint) - } - if v.Source != "" { - providerBlock = fmt.Sprintf("%s\nsource = %q", providerBlock, v.Source) - } - if providerBlock != "" { - providerBlock = fmt.Sprintf("%s = {%s\n}\n", p, providerBlock) - } - requiredProviders = append(requiredProviders, providerBlock) - } - - if len(requiredProviders) > 0 { - lines = append([]string{fmt.Sprintf("terraform {\nrequired_providers {\n%s}\n}\n\n", strings.Join(requiredProviders, ""))}, lines...) - } + runNewTest(ctx, t, c, helper) - return strings.Join(lines, ""), nil + logging.HelperResourceDebug(ctx, "Finished TestCase") } // UnitTest is a helper to force the acceptance testing harness to run in the // normal unit test suite. This should only be used for resource that don't // have any external dependencies. +// +// Test() function requirements and documentation also apply to this function. func UnitTest(t testing.T, c TestCase) { t.Helper() @@ -622,10 +758,6 @@ func UnitTest(t testing.T, c TestCase) { } func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) { - if c.ResourceName == "" { - return nil, fmt.Errorf("ResourceName must be set in TestStep") - } - for _, m := range state.Modules { if len(m.Resources) > 0 { if v, ok := m.Resources[c.ResourceName]; ok { @@ -643,6 +775,9 @@ func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, // // As a user testing their provider, this lets you decompose your checks // into smaller pieces more easily. +// +// ComposeTestCheckFunc returns immediately on the first TestCheckFunc error. +// To aggregrate all errors, use ComposeAggregateTestCheckFunc instead. func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { return func(s *terraform.State) error { for i, f := range fs { @@ -677,10 +812,48 @@ func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { } } -// TestCheckResourceAttrSet is a TestCheckFunc which ensures a value -// exists in state for the given name/key combination. It is useful when -// testing that computed values were set, when it is not possible to -// know ahead of time what the values will be. +// TestCheckResourceAttrSet ensures any value exists in the state for the +// given name and key combination. The opposite of this TestCheckFunc is +// TestCheckNoResourceAttr. State value checking is only recommended for +// testing Computed attributes and attribute defaults. +// +// Use this as a last resort when a more specific TestCheckFunc cannot be +// implemented, such as: +// +// - TestCheckResourceAttr: Equality checking of non-TypeSet state value. +// - TestCheckResourceAttrPair: Equality checking of non-TypeSet state +// value, based on another state value. +// - TestCheckTypeSet*: Equality checking of TypeSet state values. +// - TestMatchResourceAttr: Regular expression checking of non-TypeSet +// state value. +// - TestMatchTypeSet*: Regular expression checking on TypeSet state values. +// +// For managed resources, the name parameter is combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the following special key syntax to inspect underlying +// values of a list or map attribute: +// +// - .{NUMBER}: List value at index, e.g. .0 to inspect the first element +// - .{KEY}: Map value at key, e.g. .example to inspect the example key +// value +// +// While it is possible to check nested attributes under list and map +// attributes using the special key syntax, checking a list, map, or set +// attribute directly is not supported. Use TestCheckResourceAttr with +// the special .# or .% key syntax for those situations instead. func TestCheckResourceAttrSet(name, key string) TestCheckFunc { return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { is, err := primaryInstanceState(s, name) @@ -707,15 +880,71 @@ func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCh } func testCheckResourceAttrSet(is *terraform.InstanceState, name string, key string) error { - if val, ok := is.Attributes[key]; !ok || val == "" { - return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key) + val, ok := is.Attributes[key] + + if ok && val != "" { + return nil } - return nil + if _, ok := is.Attributes[key+".#"]; ok { + return fmt.Errorf( + "%s: list or set attribute '%s' must be checked by element count key (%s) or element value keys (e.g. %s). Set element value checks should use TestCheckTypeSet functions instead.", + name, + key, + key+".#", + key+".0", + ) + } + + if _, ok := is.Attributes[key+".%"]; ok { + return fmt.Errorf( + "%s: map attribute '%s' must be checked by element count key (%s) or element value keys (e.g. %s).", + name, + key, + key+".%", + key+".examplekey", + ) + } + + return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key) } -// TestCheckResourceAttr is a TestCheckFunc which validates -// the value in state for the given name/key combination. +// TestCheckResourceAttr ensures a specific value is stored in state for the +// given name and key combination. State value checking is only recommended for +// testing Computed attributes and attribute defaults. +// +// For managed resources, the name parameter is combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the following special key syntax to inspect list, map, and +// set attributes: +// +// - .{NUMBER}: List value at index, e.g. .0 to inspect the first element. +// Use the TestCheckTypeSet* and TestMatchTypeSet* functions instead +// for sets. +// - .{KEY}: Map value at key, e.g. .example to inspect the example key +// value. +// - .#: Number of elements in list or set. +// - .%: Number of elements in map. +// +// The value parameter is the stringified data to check at the given key. Use +// the following attribute type rules to set the value: +// +// - Boolean: "false" or "true". +// - Float/Integer: Stringified number, such as "1.2" or "123". +// - String: No conversion necessary. func TestCheckResourceAttr(name, key, value string) TestCheckFunc { return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { is, err := primaryInstanceState(s, name) @@ -742,23 +971,40 @@ func TestCheckModuleResourceAttr(mp []string, name string, key string, value str } func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error { - // Empty containers may be elided from the state. - // If the intent here is to check for an empty container, allow the key to - // also be non-existent. - emptyCheck := false - if value == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { - emptyCheck = true - } + v, ok := is.Attributes[key] - if v, ok := is.Attributes[key]; !ok || v != value { - if emptyCheck && !ok { + if !ok { + // Empty containers may be elided from the state. + // If the intent here is to check for an empty container, allow the key to + // also be non-existent. + if value == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { return nil } - if !ok { - return fmt.Errorf("%s: Attribute '%s' not found", name, key) + if _, ok := is.Attributes[key+".#"]; ok { + return fmt.Errorf( + "%s: list or set attribute '%s' must be checked by element count key (%s) or element value keys (e.g. %s). Set element value checks should use TestCheckTypeSet functions instead.", + name, + key, + key+".#", + key+".0", + ) + } + + if _, ok := is.Attributes[key+".%"]; ok { + return fmt.Errorf( + "%s: map attribute '%s' must be checked by element count key (%s) or element value keys (e.g. %s).", + name, + key, + key+".%", + key+".examplekey", + ) } + return fmt.Errorf("%s: Attribute '%s' not found", name, key) + } + + if v != value { return fmt.Errorf( "%s: Attribute '%s' expected %#v, got %#v", name, @@ -766,11 +1012,103 @@ func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value, v) } + return nil } -// TestCheckNoResourceAttr is a TestCheckFunc which ensures that -// NO value exists in state for the given name/key combination. +// CheckResourceAttrWithFunc is the callback type used to apply a custom checking logic +// when using TestCheckResourceAttrWith and a value is found for the given name and key. +// +// When this function returns an error, TestCheckResourceAttrWith will fail the check. +type CheckResourceAttrWithFunc func(value string) error + +// TestCheckResourceAttrWith ensures a value stored in state for the +// given name and key combination, is checked against a custom logic. +// State value checking is only recommended for testing Computed attributes +// and attribute defaults. +// +// For managed resources, the name parameter is combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the following special key syntax to inspect list, map, and +// set attributes: +// +// - .{NUMBER}: List value at index, e.g. .0 to inspect the first element. +// Use the TestCheckTypeSet* and TestMatchTypeSet* functions instead +// for sets. +// - .{KEY}: Map value at key, e.g. .example to inspect the example key +// value. +// - .#: Number of elements in list or set. +// - .%: Number of elements in map. +// +// The checkValueFunc parameter is a CheckResourceAttrWithFunc, +// and it's provided with the attribute value to apply a custom checking logic, +// if it was found in the state. The function must return an error for the +// check to fail, or `nil` to succeed. +func TestCheckResourceAttrWith(name, key string, checkValueFunc CheckResourceAttrWithFunc) TestCheckFunc { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + err = testCheckResourceAttrSet(is, name, key) + if err != nil { + return err + } + + err = checkValueFunc(is.Attributes[key]) + if err != nil { + return fmt.Errorf("%s: Attribute %q value: %w", name, key, err) + } + + return nil + }) +} + +// TestCheckNoResourceAttr ensures no value exists in the state for the +// given name and key combination. The opposite of this TestCheckFunc is +// TestCheckResourceAttrSet. State value checking is only recommended for +// testing Computed attributes and attribute defaults. +// +// For managed resources, the name parameter is combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the following special key syntax to inspect underlying +// values of a list or map attribute: +// +// - .{NUMBER}: List value at index, e.g. .0 to inspect the first element. +// - .{KEY}: Map value at key, e.g. .example to inspect the example key +// value. +// +// While it is possible to check nested attributes under list and map +// attributes using the special key syntax, checking a list, map, or set +// attribute directly is not supported. Use TestCheckResourceAttr with +// the special .# or .% key syntax for those situations instead. func TestCheckNoResourceAttr(name, key string) TestCheckFunc { return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { is, err := primaryInstanceState(s, name) @@ -797,28 +1135,76 @@ func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestChe } func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error { + v, ok := is.Attributes[key] + // Empty containers may sometimes be included in the state. // If the intent here is to check for an empty container, allow the value to // also be "0". - emptyCheck := false - if strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%") { - emptyCheck = true - } - - val, exists := is.Attributes[key] - if emptyCheck && val == "0" { + if v == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { return nil } - if exists { + if ok { return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) } + if _, ok := is.Attributes[key+".#"]; ok { + return fmt.Errorf( + "%s: list or set attribute '%s' must be checked by element count key (%s) or element value keys (e.g. %s). Set element value checks should use TestCheckTypeSet functions instead.", + name, + key, + key+".#", + key+".0", + ) + } + + if _, ok := is.Attributes[key+".%"]; ok { + return fmt.Errorf( + "%s: map attribute '%s' must be checked by element count key (%s) or element value keys (e.g. %s).", + name, + key, + key+".%", + key+".examplekey", + ) + } + return nil } -// TestMatchResourceAttr is a TestCheckFunc which checks that the value -// in state for the given name/key combination matches the given regex. +// TestMatchResourceAttr ensures a value matching a regular expression is +// stored in state for the given name and key combination. State value checking +// is only recommended for testing Computed attributes and attribute defaults. +// +// For managed resources, the name parameter is combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the following special key syntax to inspect list, map, and +// set attributes: +// +// - .{NUMBER}: List value at index, e.g. .0 to inspect the first element. +// Use the TestCheckTypeSet* and TestMatchTypeSet* functions instead +// for sets. +// - .{KEY}: Map value at key, e.g. .example to inspect the example key +// value. +// - .#: Number of elements in list or set. +// - .%: Number of elements in map. +// +// The value parameter is a compiled regular expression. A typical pattern is +// using the regexp.MustCompile() function, which will automatically ensure the +// regular expression is supported by the Go regular expression handlers during +// compilation. func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { is, err := primaryInstanceState(s, name) @@ -860,6 +1246,9 @@ func testMatchResourceAttr(is *terraform.InstanceState, name string, key string, // TestCheckResourceAttrPtr is like TestCheckResourceAttr except the // value is a pointer so that it can be updated while the test is running. // It will only be dereferenced at the point this step is run. +// +// Refer to the TestCheckResourceAttr documentation for more information about +// setting the name, key, and value parameters. func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc { return func(s *terraform.State) error { return TestCheckResourceAttr(name, key, *value)(s) @@ -874,8 +1263,39 @@ func TestCheckModuleResourceAttrPtr(mp []string, name string, key string, value } } -// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values -// in state for a pair of name/key combinations are equal. +// TestCheckResourceAttrPair ensures value equality in state between the first +// given name and key combination and the second name and key combination. +// State value checking is only recommended for testing Computed attributes +// and attribute defaults. +// +// For managed resources, the name parameter is combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The first and second names may use any combination of managed resources +// and/or data sources. +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the following special key syntax to inspect list, map, and +// set attributes: +// +// - .{NUMBER}: List value at index, e.g. .0 to inspect the first element. +// Use the TestCheckTypeSet* and TestMatchTypeSet* functions instead +// for sets. +// - .{KEY}: Map value at key, e.g. .example to inspect the example key +// value. +// - .#: Number of elements in list or set. +// - .%: Number of elements in map. func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { return checkIfIndexesIntoTypeSetPair(keyFirst, keySecond, func(s *terraform.State) error { isFirst, err := primaryInstanceState(s, nameFirst) @@ -1004,7 +1424,7 @@ func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc { // modulePrimaryInstanceState returns the instance state for the given resource // name in a ModuleState -func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, name string) (*terraform.InstanceState, error) { +func modulePrimaryInstanceState(ms *terraform.ModuleState, name string) (*terraform.InstanceState, error) { rs, ok := ms.Resources[name] if !ok { return nil, fmt.Errorf("Not found: %s in %s", name, ms.Path) @@ -1026,14 +1446,14 @@ func modulePathPrimaryInstanceState(s *terraform.State, mp addrs.ModuleInstance, return nil, fmt.Errorf("No module found at: %s", mp) } - return modulePrimaryInstanceState(s, ms, name) + return modulePrimaryInstanceState(ms, name) } // primaryInstanceState returns the primary instance state for the given // resource name in the root module. func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { ms := s.RootModule() - return modulePrimaryInstanceState(s, ms, name) + return modulePrimaryInstanceState(ms, name) } // indexesIntoTypeSet is a heuristic to try and identify if a flatmap style diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go index 1e39294fd..e28426fde 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go @@ -1,14 +1,21 @@ package resource import ( + "context" "errors" "fmt" - "log" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func testStepTaint(state *terraform.State, step TestStep) error { +func testStepTaint(ctx context.Context, state *terraform.State, step TestStep) error { + if len(step.Taint) == 0 { + return nil + } + + logging.HelperResourceTrace(ctx, fmt.Sprintf("Using TestStep Taint: %v", step.Taint)) + for _, p := range step.Taint { m := state.RootModule() if m == nil { @@ -18,7 +25,7 @@ func testStepTaint(state *terraform.State, step TestStep) error { if !ok { return fmt.Errorf("resource %q not found in state", p) } - log.Printf("[WARN] Test: Explicitly tainting resource %q", p) + logging.HelperResourceWarn(ctx, fmt.Sprintf("Explicitly tainting resource %q", p)) rs.Taint() } return nil diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go index 1dc73906b..f1e607f83 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go @@ -1,8 +1,8 @@ package resource import ( + "context" "fmt" - "log" "reflect" "strings" @@ -10,63 +10,77 @@ import ( tfjson "github.com/hashicorp/terraform-json" testing "github.com/mitchellh/go-testing-interface" - "github.com/hashicorp/terraform-plugin-go/tfprotov5" - "github.com/hashicorp/terraform-plugin-go/tfprotov6" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func runPostTestDestroy(t testing.T, c TestCase, wd *plugintest.WorkingDir, factories map[string]func() (*schema.Provider, error), v5factories map[string]func() (tfprotov5.ProviderServer, error), v6factories map[string]func() (tfprotov6.ProviderServer, error), statePreDestroy *terraform.State) error { +func runPostTestDestroy(ctx context.Context, t testing.T, c TestCase, wd *plugintest.WorkingDir, providers *providerFactories, statePreDestroy *terraform.State) error { t.Helper() - err := runProviderCommand(t, func() error { - return wd.Destroy() - }, wd, providerFactories{ - legacy: factories, - protov5: v5factories, - protov6: v6factories}) + err := runProviderCommand(ctx, t, func() error { + return wd.Destroy(ctx) + }, wd, providers) if err != nil { return err } if c.CheckDestroy != nil { + logging.HelperResourceTrace(ctx, "Using TestCase CheckDestroy") + logging.HelperResourceDebug(ctx, "Calling TestCase CheckDestroy") + if err := c.CheckDestroy(statePreDestroy); err != nil { return err } + + logging.HelperResourceDebug(ctx, "Called TestCase CheckDestroy") } return nil } -func runNewTest(t testing.T, c TestCase, helper *plugintest.Helper) { +func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest.Helper) { t.Helper() spewConf := spew.NewDefaultConfig() spewConf.SortKeys = true - wd := helper.RequireNewWorkingDir(t) + wd := helper.RequireNewWorkingDir(ctx, t) + + ctx = logging.TestTerraformPathContext(ctx, wd.GetHelper().TerraformExecPath()) + ctx = logging.TestWorkingDirectoryContext(ctx, wd.GetHelper().WorkingDirectory()) + + providers := &providerFactories{ + legacy: c.ProviderFactories, + protov5: c.ProtoV5ProviderFactories, + protov6: c.ProtoV6ProviderFactories, + } defer func() { var statePreDestroy *terraform.State var err error - err = runProviderCommand(t, func() error { - statePreDestroy, err = getState(t, wd) + err = runProviderCommand(ctx, t, func() error { + statePreDestroy, err = getState(ctx, t, wd) if err != nil { return err } return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { + logging.HelperResourceError(ctx, + "Error retrieving state, there may be dangling resources", + map[string]interface{}{logging.KeyError: err}, + ) t.Fatalf("Error retrieving state, there may be dangling resources: %s", err.Error()) return } if !stateIsEmpty(statePreDestroy) { - err := runPostTestDestroy(t, c, wd, c.ProviderFactories, c.ProtoV5ProviderFactories, c.ProtoV6ProviderFactories, statePreDestroy) + err := runPostTestDestroy(ctx, t, c, wd, providers, statePreDestroy) if err != nil { + logging.HelperResourceError(ctx, + "Error running post-test destroy, there may be dangling resources", + map[string]interface{}{logging.KeyError: err}, + ) t.Fatalf("Error running post-test destroy, there may be dangling resources: %s", err.Error()) } } @@ -74,95 +88,233 @@ func runNewTest(t testing.T, c TestCase, helper *plugintest.Helper) { wd.Close() }() - providerCfg, err := testProviderConfig(c) - if err != nil { - t.Fatal(err) - } + if c.hasProviders(ctx) { + err := wd.SetConfig(ctx, c.providerConfig(ctx)) - err = wd.SetConfig(providerCfg) - if err != nil { - t.Fatalf("Error setting test config: %s", err) - } - err = runProviderCommand(t, func() error { - return wd.Init() - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) - if err != nil { - t.Fatalf("Error running init: %s", err.Error()) - return + if err != nil { + logging.HelperResourceError(ctx, + "TestCase error setting provider configuration", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestCase error setting provider configuration: %s", err) + } + + err = runProviderCommand(ctx, t, func() error { + return wd.Init(ctx) + }, wd, providers) + + if err != nil { + logging.HelperResourceError(ctx, + "TestCase error running init", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestCase error running init: %s", err.Error()) + } } + logging.HelperResourceDebug(ctx, "Starting TestSteps") + // use this to track last step succesfully applied // acts as default for import tests var appliedCfg string - for i, step := range c.Steps { + for stepIndex, step := range c.Steps { + stepNumber := stepIndex + 1 // 1-based indexing for humans + ctx = logging.TestStepNumberContext(ctx, stepNumber) + + logging.HelperResourceDebug(ctx, "Starting TestStep") + if step.PreConfig != nil { + logging.HelperResourceDebug(ctx, "Calling TestStep PreConfig") step.PreConfig() + logging.HelperResourceDebug(ctx, "Called TestStep PreConfig") } if step.SkipFunc != nil { + logging.HelperResourceDebug(ctx, "Calling TestStep SkipFunc") + skip, err := step.SkipFunc() if err != nil { - t.Fatal(err) + logging.HelperResourceError(ctx, + "Error calling TestStep SkipFunc", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Error calling TestStep SkipFunc: %s", err.Error()) } + + logging.HelperResourceDebug(ctx, "Called TestStep SkipFunc") + if skip { - log.Printf("[WARN] Skipping step %d/%d", i+1, len(c.Steps)) + t.Logf("Skipping step %d/%d due to SkipFunc", stepNumber, len(c.Steps)) + logging.HelperResourceWarn(ctx, "Skipping TestStep due to SkipFunc") continue } } + if step.Config != "" && !step.Destroy && len(step.Taint) > 0 { + var state *terraform.State + + err := runProviderCommand(ctx, t, func() error { + var err error + + state, err = getState(ctx, t, wd) + + if err != nil { + return err + } + + return nil + }, wd, providers) + + if err != nil { + logging.HelperResourceError(ctx, + "TestStep error reading prior state before tainting resources", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestStep %d/%d error reading prior state before tainting resources: %s", stepNumber, len(c.Steps), err) + } + + err = testStepTaint(ctx, state, step) + + if err != nil { + logging.HelperResourceError(ctx, + "TestStep error tainting resources", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestStep %d/%d error tainting resources: %s", stepNumber, len(c.Steps), err) + } + } + + if step.hasProviders(ctx) { + providers = &providerFactories{ + legacy: sdkProviderFactories(c.ProviderFactories).merge(step.ProviderFactories), + protov5: protov5ProviderFactories(c.ProtoV5ProviderFactories).merge(step.ProtoV5ProviderFactories), + protov6: protov6ProviderFactories(c.ProtoV6ProviderFactories).merge(step.ProtoV6ProviderFactories), + } + + providerCfg := step.providerConfig(ctx) + + err := wd.SetConfig(ctx, providerCfg) + + if err != nil { + logging.HelperResourceError(ctx, + "TestStep error setting provider configuration", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestStep %d/%d error setting test provider configuration: %s", stepNumber, len(c.Steps), err) + } + + err = runProviderCommand( + ctx, + t, + func() error { + return wd.Init(ctx) + }, + wd, + providers, + ) + + if err != nil { + logging.HelperResourceError(ctx, + "TestStep error running init", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestStep %d/%d running init: %s", stepNumber, len(c.Steps), err.Error()) + return + } + } + if step.ImportState { - err := testStepNewImportState(t, c, helper, wd, step, appliedCfg) + logging.HelperResourceTrace(ctx, "TestStep is ImportState mode") + + err := testStepNewImportState(ctx, t, helper, wd, step, appliedCfg, providers) if step.ExpectError != nil { + logging.HelperResourceDebug(ctx, "Checking TestStep ExpectError") if err == nil { - t.Fatalf("Step %d/%d error running import: expected an error but got none", i+1, len(c.Steps)) + logging.HelperResourceError(ctx, + "Error running import: expected an error but got none", + ) + t.Fatalf("Step %d/%d error running import: expected an error but got none", stepNumber, len(c.Steps)) } if !step.ExpectError.MatchString(err.Error()) { - t.Fatalf("Step %d/%d error running import, expected an error with pattern (%s), no match on: %s", i+1, len(c.Steps), step.ExpectError.String(), err) + logging.HelperResourceError(ctx, + fmt.Sprintf("Error running import: expected an error with pattern (%s)", step.ExpectError.String()), + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Step %d/%d error running import, expected an error with pattern (%s), no match on: %s", stepNumber, len(c.Steps), step.ExpectError.String(), err) } } else { if err != nil && c.ErrorCheck != nil { + logging.HelperResourceDebug(ctx, "Calling TestCase ErrorCheck") err = c.ErrorCheck(err) + logging.HelperResourceDebug(ctx, "Called TestCase ErrorCheck") } if err != nil { - t.Fatalf("Step %d/%d error running import: %s", i+1, len(c.Steps), err) + logging.HelperResourceError(ctx, + "Error running import", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Step %d/%d error running import: %s", stepNumber, len(c.Steps), err) } } + + logging.HelperResourceDebug(ctx, "Finished TestStep") + continue } if step.Config != "" { - err := testStepNewConfig(t, c, wd, step) + logging.HelperResourceTrace(ctx, "TestStep is Config mode") + + err := testStepNewConfig(ctx, t, c, wd, step, providers) if step.ExpectError != nil { + logging.HelperResourceDebug(ctx, "Checking TestStep ExpectError") + if err == nil { - t.Fatalf("Step %d/%d, expected an error but got none", i+1, len(c.Steps)) + logging.HelperResourceError(ctx, + "Expected an error but got none", + ) + t.Fatalf("Step %d/%d, expected an error but got none", stepNumber, len(c.Steps)) } if !step.ExpectError.MatchString(err.Error()) { - t.Fatalf("Step %d/%d, expected an error with pattern, no match on: %s", i+1, len(c.Steps), err) + logging.HelperResourceError(ctx, + fmt.Sprintf("Expected an error with pattern (%s)", step.ExpectError.String()), + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Step %d/%d, expected an error with pattern, no match on: %s", stepNumber, len(c.Steps), err) } } else { if err != nil && c.ErrorCheck != nil { + logging.HelperResourceDebug(ctx, "Calling TestCase ErrorCheck") + err = c.ErrorCheck(err) + + logging.HelperResourceDebug(ctx, "Called TestCase ErrorCheck") } if err != nil { - t.Fatalf("Step %d/%d error: %s", i+1, len(c.Steps), err) + logging.HelperResourceError(ctx, + "Unexpected error", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Step %d/%d error: %s", stepNumber, len(c.Steps), err) } } + appliedCfg = step.Config + + logging.HelperResourceDebug(ctx, "Finished TestStep") + continue } - t.Fatal("Unsupported test mode") + t.Fatalf("Step %d/%d, unsupported test mode", stepNumber, len(c.Steps)) } } -func getState(t testing.T, wd *plugintest.WorkingDir) (*terraform.State, error) { +func getState(ctx context.Context, t testing.T, wd *plugintest.WorkingDir) (*terraform.State, error) { t.Helper() - jsonState, err := wd.State() + jsonState, err := wd.State(ctx) if err != nil { return nil, err } @@ -188,7 +340,7 @@ func planIsEmpty(plan *tfjson.Plan) bool { return true } -func testIDRefresh(c TestCase, t testing.T, wd *plugintest.WorkingDir, step TestStep, r *terraform.ResourceState) error { +func testIDRefresh(ctx context.Context, t testing.T, c TestCase, wd *plugintest.WorkingDir, step TestStep, r *terraform.ResourceState, providers *providerFactories) error { t.Helper() spewConf := spew.NewDefaultConfig() @@ -202,36 +354,29 @@ func testIDRefresh(c TestCase, t testing.T, wd *plugintest.WorkingDir, step Test // Temporarily set the config to a minimal provider config for the refresh // test. After the refresh we can reset it. - cfg, err := testProviderConfig(c) - if err != nil { - return err - } - err = wd.SetConfig(cfg) + err := wd.SetConfig(ctx, c.providerConfig(ctx)) if err != nil { t.Fatalf("Error setting import test config: %s", err) } defer func() { - err = wd.SetConfig(step.Config) + err = wd.SetConfig(ctx, step.Config) if err != nil { t.Fatalf("Error resetting test config: %s", err) } }() // Refresh! - err = runProviderCommand(t, func() error { - err = wd.Refresh() + err = runProviderCommand(ctx, t, func() error { + err = wd.Refresh(ctx) if err != nil { t.Fatalf("Error running terraform refresh: %s", err) } - state, err = getState(t, wd) + state, err = getState(ctx, t, wd) if err != nil { return err } return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return err } @@ -246,6 +391,11 @@ func testIDRefresh(c TestCase, t testing.T, wd *plugintest.WorkingDir, step Test } actual := actualR.Primary.Attributes expected := r.Primary.Attributes + + if len(c.IDRefreshIgnore) > 0 { + logging.HelperResourceTrace(ctx, fmt.Sprintf("Using TestCase IDRefreshIgnore: %v", c.IDRefreshIgnore)) + } + // Remove fields we're ignoring for _, v := range c.IDRefreshIgnore { for k := range actual { diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go index 9900cc396..09d18c364 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go @@ -1,56 +1,31 @@ package resource import ( + "context" "errors" "fmt" tfjson "github.com/hashicorp/terraform-json" testing "github.com/mitchellh/go-testing-interface" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func testStepNewConfig(t testing.T, c TestCase, wd *plugintest.WorkingDir, step TestStep) error { +func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugintest.WorkingDir, step TestStep, providers *providerFactories) error { t.Helper() - var idRefreshCheck *terraform.ResourceState - idRefresh := c.IDRefreshName != "" - - if !step.Destroy { - var state *terraform.State - var err error - err = runProviderCommand(t, func() error { - state, err = getState(t, wd) - if err != nil { - return err - } - return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) - if err != nil { - return err - } - if err := testStepTaint(state, step); err != nil { - return fmt.Errorf("Error when tainting resources: %s", err) - } - } - - err := wd.SetConfig(step.Config) + err := wd.SetConfig(ctx, step.Config) if err != nil { return fmt.Errorf("Error setting config: %w", err) } // require a refresh before applying // failing to do this will result in data sources not being updated - err = runProviderCommand(t, func() error { - return wd.Refresh() - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + err = runProviderCommand(ctx, t, func() error { + return wd.Refresh(ctx) + }, wd, providers) if err != nil { return fmt.Errorf("Error running pre-apply refresh: %w", err) } @@ -59,16 +34,15 @@ func testStepNewConfig(t testing.T, c TestCase, wd *plugintest.WorkingDir, step // subsequent Apply, and use the follow-up Plan that checks for // permadiffs if !step.PlanOnly { + logging.HelperResourceDebug(ctx, "Running Terraform CLI plan and apply") + // Plan! - err := runProviderCommand(t, func() error { + err := runProviderCommand(ctx, t, func() error { if step.Destroy { - return wd.CreateDestroyPlan() + return wd.CreateDestroyPlan(ctx) } - return wd.CreatePlan() - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + return wd.CreatePlan(ctx) + }, wd, providers) if err != nil { return fmt.Errorf("Error running pre-apply plan: %w", err) } @@ -77,27 +51,21 @@ func testStepNewConfig(t testing.T, c TestCase, wd *plugintest.WorkingDir, step // that the destroy steps can verify their behavior in the // check function var stateBeforeApplication *terraform.State - err = runProviderCommand(t, func() error { - stateBeforeApplication, err = getState(t, wd) + err = runProviderCommand(ctx, t, func() error { + stateBeforeApplication, err = getState(ctx, t, wd) if err != nil { return err } return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error retrieving pre-apply state: %w", err) } // Apply the diff, creating real resources - err = runProviderCommand(t, func() error { - return wd.Apply() - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + err = runProviderCommand(ctx, t, func() error { + return wd.Apply(ctx) + }, wd, providers) if err != nil { if step.Destroy { return fmt.Errorf("Error running destroy: %w", err) @@ -107,22 +75,21 @@ func testStepNewConfig(t testing.T, c TestCase, wd *plugintest.WorkingDir, step // Get the new state var state *terraform.State - err = runProviderCommand(t, func() error { - state, err = getState(t, wd) + err = runProviderCommand(ctx, t, func() error { + state, err = getState(ctx, t, wd) if err != nil { return err } return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error retrieving state after apply: %w", err) } // Run any configured checks if step.Check != nil { + logging.HelperResourceTrace(ctx, "Using TestStep Check") + state.IsBinaryDrivenTest = true if step.Destroy { if err := step.Check(stateBeforeApplication); err != nil { @@ -137,44 +104,36 @@ func testStepNewConfig(t testing.T, c TestCase, wd *plugintest.WorkingDir, step } // Test for perpetual diffs by performing a plan, a refresh, and another plan + logging.HelperResourceDebug(ctx, "Running Terraform CLI plan to check for perpetual differences") // do a plan - err = runProviderCommand(t, func() error { + err = runProviderCommand(ctx, t, func() error { if step.Destroy { - return wd.CreateDestroyPlan() + return wd.CreateDestroyPlan(ctx) } - return wd.CreatePlan() - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + return wd.CreatePlan(ctx) + }, wd, providers) if err != nil { return fmt.Errorf("Error running post-apply plan: %w", err) } var plan *tfjson.Plan - err = runProviderCommand(t, func() error { + err = runProviderCommand(ctx, t, func() error { var err error - plan, err = wd.SavedPlan() + plan, err = wd.SavedPlan(ctx) return err - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error retrieving post-apply plan: %w", err) } if !planIsEmpty(plan) && !step.ExpectNonEmptyPlan { var stdout string - err = runProviderCommand(t, func() error { + err = runProviderCommand(ctx, t, func() error { var err error - stdout, err = wd.SavedPlanRawStdout() + stdout, err = wd.SavedPlanRawStdout(ctx) return err - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error retrieving formatted plan output: %w", err) } @@ -183,39 +142,30 @@ func testStepNewConfig(t testing.T, c TestCase, wd *plugintest.WorkingDir, step // do a refresh if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) { - err := runProviderCommand(t, func() error { - return wd.Refresh() - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + err := runProviderCommand(ctx, t, func() error { + return wd.Refresh(ctx) + }, wd, providers) if err != nil { return fmt.Errorf("Error running post-apply refresh: %w", err) } } // do another plan - err = runProviderCommand(t, func() error { + err = runProviderCommand(ctx, t, func() error { if step.Destroy { - return wd.CreateDestroyPlan() + return wd.CreateDestroyPlan(ctx) } - return wd.CreatePlan() - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + return wd.CreatePlan(ctx) + }, wd, providers) if err != nil { return fmt.Errorf("Error running second post-apply plan: %w", err) } - err = runProviderCommand(t, func() error { + err = runProviderCommand(ctx, t, func() error { var err error - plan, err = wd.SavedPlan() + plan, err = wd.SavedPlan(ctx) return err - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error retrieving second post-apply plan: %w", err) } @@ -223,14 +173,11 @@ func testStepNewConfig(t testing.T, c TestCase, wd *plugintest.WorkingDir, step // check if plan is empty if !planIsEmpty(plan) && !step.ExpectNonEmptyPlan { var stdout string - err = runProviderCommand(t, func() error { + err = runProviderCommand(ctx, t, func() error { var err error - stdout, err = wd.SavedPlanRawStdout() + stdout, err = wd.SavedPlanRawStdout(ctx) return err - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error retrieving formatted second plan output: %w", err) } @@ -242,21 +189,29 @@ func testStepNewConfig(t testing.T, c TestCase, wd *plugintest.WorkingDir, step // ID-ONLY REFRESH // If we've never checked an id-only refresh and our state isn't // empty, find the first resource and test it. - var state *terraform.State - err = runProviderCommand(t, func() error { - state, err = getState(t, wd) + if c.IDRefreshName != "" { + logging.HelperResourceTrace(ctx, "Using TestCase IDRefreshName") + + var state *terraform.State + + err = runProviderCommand(ctx, t, func() error { + state, err = getState(ctx, t, wd) + if err != nil { + return err + } + return nil + }, wd, providers) + if err != nil { return err } - return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) - if err != nil { - return err - } - if idRefresh && idRefreshCheck == nil && !state.Empty() { + + if state.Empty() { + return nil + } + + var idRefreshCheck *terraform.ResourceState + // Find the first non-nil resource in the state for _, m := range state.Modules { if len(m.Resources) > 0 { @@ -275,7 +230,7 @@ func testStepNewConfig(t testing.T, c TestCase, wd *plugintest.WorkingDir, step // this fails. If refresh isn't read-only, then this will have // caught a different bug. if idRefreshCheck != nil { - if err := testIDRefresh(c, t, wd, step, idRefreshCheck); err != nil { + if err := testIDRefresh(ctx, t, c, wd, step, idRefreshCheck, providers); err != nil { return fmt.Errorf( "[ERROR] Test: ID-only test failed: %s", err) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go index a38c3c7dc..ec61b055f 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go @@ -1,17 +1,20 @@ package resource import ( + "context" + "fmt" "reflect" "strings" "github.com/davecgh/go-spew/spew" testing "github.com/mitchellh/go-testing-interface" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func testStepNewImportState(t testing.T, c TestCase, helper *plugintest.Helper, wd *plugintest.WorkingDir, step TestStep, cfg string) error { +func testStepNewImportState(ctx context.Context, t testing.T, helper *plugintest.Helper, wd *plugintest.WorkingDir, step TestStep, cfg string, providers *providerFactories) error { t.Helper() spewConf := spew.NewDefaultConfig() @@ -24,16 +27,13 @@ func testStepNewImportState(t testing.T, c TestCase, helper *plugintest.Helper, // get state from check sequence var state *terraform.State var err error - err = runProviderCommand(t, func() error { - state, err = getState(t, wd) + err = runProviderCommand(ctx, t, func() error { + state, err = getState(ctx, t, wd) if err != nil { return err } return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { t.Fatalf("Error getting state: %s", err) } @@ -42,73 +42,89 @@ func testStepNewImportState(t testing.T, c TestCase, helper *plugintest.Helper, var importId string switch { case step.ImportStateIdFunc != nil: + logging.HelperResourceTrace(ctx, "Using TestStep ImportStateIdFunc for import identifier") + var err error + + logging.HelperResourceDebug(ctx, "Calling TestStep ImportStateIdFunc") + importId, err = step.ImportStateIdFunc(state) + if err != nil { t.Fatal(err) } + + logging.HelperResourceDebug(ctx, "Called TestStep ImportStateIdFunc") case step.ImportStateId != "": + logging.HelperResourceTrace(ctx, "Using TestStep ImportStateId for import identifier") + importId = step.ImportStateId default: + logging.HelperResourceTrace(ctx, "Using resource identifier for import identifier") + resource, err := testResource(step, state) if err != nil { t.Fatal(err) } importId = resource.Primary.ID } - importId = step.ImportStateIdPrefix + importId + + if step.ImportStateIdPrefix != "" { + logging.HelperResourceTrace(ctx, "Prepending TestStep ImportStateIdPrefix for import identifier") + + importId = step.ImportStateIdPrefix + importId + } + + logging.HelperResourceTrace(ctx, fmt.Sprintf("Using import identifier: %s", importId)) // Create working directory for import tests if step.Config == "" { + logging.HelperResourceTrace(ctx, "Using prior TestStep Config for import") + step.Config = cfg if step.Config == "" { t.Fatal("Cannot import state with no specified config") } } - importWd := helper.RequireNewWorkingDir(t) + importWd := helper.RequireNewWorkingDir(ctx, t) defer importWd.Close() - err = importWd.SetConfig(step.Config) + err = importWd.SetConfig(ctx, step.Config) if err != nil { t.Fatalf("Error setting test config: %s", err) } - err = runProviderCommand(t, func() error { - return importWd.Init() - }, importWd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + logging.HelperResourceDebug(ctx, "Running Terraform CLI init and import") + + err = runProviderCommand(ctx, t, func() error { + return importWd.Init(ctx) + }, importWd, providers) if err != nil { t.Fatalf("Error running init: %s", err) } - err = runProviderCommand(t, func() error { - return importWd.Import(step.ResourceName, importId) - }, importWd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + err = runProviderCommand(ctx, t, func() error { + return importWd.Import(ctx, step.ResourceName, importId) + }, importWd, providers) if err != nil { return err } var importState *terraform.State - err = runProviderCommand(t, func() error { - importState, err = getState(t, importWd) + err = runProviderCommand(ctx, t, func() error { + importState, err = getState(ctx, t, importWd) if err != nil { return err } return nil - }, importWd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, importWd, providers) if err != nil { t.Fatalf("Error getting state: %s", err) } // Go through the imported state and verify if step.ImportStateCheck != nil { + logging.HelperResourceTrace(ctx, "Using TestStep ImportStateCheck") + var states []*terraform.InstanceState for _, r := range importState.RootModule().Resources { if r.Primary != nil { @@ -117,20 +133,27 @@ func testStepNewImportState(t testing.T, c TestCase, helper *plugintest.Helper, states = append(states, is) } } + + logging.HelperResourceDebug(ctx, "Calling TestStep ImportStateCheck") + if err := step.ImportStateCheck(states); err != nil { t.Fatal(err) } + + logging.HelperResourceDebug(ctx, "Called TestStep ImportStateCheck") } // Verify that all the states match if step.ImportStateVerify { - new := importState.RootModule().Resources - old := state.RootModule().Resources + logging.HelperResourceTrace(ctx, "Using TestStep ImportStateVerify") + + newResources := importState.RootModule().Resources + oldResources := state.RootModule().Resources - for _, r := range new { + for _, r := range newResources { // Find the existing resource var oldR *terraform.ResourceState - for r2Key, r2 := range old { + for r2Key, r2 := range oldResources { // Ensure that we do not match against data sources as they // cannot be imported and are not what we want to verify. // Mode is not present in ResourceState so we use the @@ -144,7 +167,7 @@ func testStepNewImportState(t testing.T, c TestCase, helper *plugintest.Helper, break } } - if oldR == nil { + if oldR == nil || oldR.Primary == nil { t.Fatalf( "Failed state verification, resource with ID %s not found", r.Primary.ID) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go index ada1d6e7e..9b1d57c66 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go @@ -14,25 +14,46 @@ const ( sentinelIndex = "*" ) -// TestCheckTypeSetElemNestedAttrs is a TestCheckFunc that accepts a resource -// name, an attribute path, which should use the sentinel value '*' for indexing -// into a TypeSet. The function verifies that an element matches the whole value -// map. +// TestCheckTypeSetElemNestedAttrs ensures a subset map of values is stored in +// state for the given name and key combination of attributes nested under a +// list or set block. Use this TestCheckFunc in preference over non-set +// variants to simplify testing code and ensure compatibility with indicies, +// which can easily change with schema changes. State value checking is only +// recommended for testing Computed attributes and attribute defaults. // -// You may check for unset keys, however this will also match keys set to empty -// string. Please provide a map with at least 1 non-empty value. +// For managed resources, the name parameter is a combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". // -// map[string]string{ -// "key1": "value", -// "key2": "", -// } +// resource "myprovider_thing" "example" { ... } // -// Use this function over SDK provided TestCheckFunctions when validating a -// TypeSet where its elements are a nested object with their own attrs/values. +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the sentinel value '*' to replace the element indexing into +// a list or set. The sentinel value can be used for each list or set index, if +// there are multiple lists or sets in the attribute path. +// +// The values parameter is the map of attribute names to attribute values +// expected to be nested under the list or set. +// +// You may check for unset nested attributes, however this will also match keys +// set to an empty string. Use a map with at least 1 non-empty value. // -// Please note, if the provided value map is not granular enough, there exists -// the possibility you match an element you were not intending to, in the TypeSet. -// Provide a full mapping of attributes to be sure the unique element exists. +// map[string]string{ +// "key1": "value", +// "key2": "", +// } +// +// If the values map is not granular enough, it is possible to match an element +// you were not intending to in the set. Provide the most complete mapping of +// attributes possible to be sure the unique element exists. func TestCheckTypeSetElemNestedAttrs(name, attr string, values map[string]string) TestCheckFunc { return func(s *terraform.State) error { is, err := primaryInstanceState(s, name) @@ -64,17 +85,47 @@ func TestCheckTypeSetElemNestedAttrs(name, attr string, values map[string]string } } -// TestMatchTypeSetElemNestedAttrs is a TestCheckFunc similar to TestCheckTypeSetElemNestedAttrs -// with the exception that it verifies that an element matches a *regexp.Regexp. +// TestMatchTypeSetElemNestedAttrs ensures a subset map of values, compared by +// regular expressions, is stored in state for the given name and key +// combination of attributes nested under a list or set block. Use this +// TestCheckFunc in preference over non-set variants to simplify testing code +// and ensure compatibility with indicies, which can easily change with schema +// changes. State value checking is only recommended for testing Computed +// attributes and attribute defaults. +// +// For managed resources, the name parameter is a combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the sentinel value '*' to replace the element indexing into +// a list or set. The sentinel value can be used for each list or set index, if +// there are multiple lists or sets in the attribute path. // -// You may check for unset keys, however this will also match keys set to empty -// string. Please provide a map with at least 1 non-empty value e.g. +// The values parameter is the map of attribute names to regular expressions +// for matching attribute values expected to be nested under the list or set. // -// map[string]*regexp.Regexp{ -// "key1": regexp.MustCompile("value"), -// "key2": regexp.MustCompile(""), -// } +// You may check for unset nested attributes, however this will also match keys +// set to an empty string. Use a map with at least 1 non-empty value. // +// map[string]*regexp.Regexp{ +// "key1": regexp.MustCompile(`^value`), +// "key2": regexp.MustCompile(`^$`), +// } +// +// If the values map is not granular enough, it is possible to match an element +// you were not intending to in the set. Provide the most complete mapping of +// attributes possible to be sure the unique element exists. func TestMatchTypeSetElemNestedAttrs(name, attr string, values map[string]*regexp.Regexp) TestCheckFunc { return func(s *terraform.State) error { is, err := primaryInstanceState(s, name) @@ -113,6 +164,39 @@ func TestMatchTypeSetElemNestedAttrs(name, attr string, values map[string]*regex // // Use this function over SDK provided TestCheckFunctions when validating a // TypeSet where its elements are a simple value + +// TestCheckTypeSetElemAttr ensures a specific value is stored in state for the +// given name and key combination under a list or set. Use this TestCheckFunc +// in preference over non-set variants to simplify testing code and ensure +// compatibility with indicies, which can easily change with schema changes. +// State value checking is only recommended for testing Computed attributes and +// attribute defaults. +// +// For managed resources, the name parameter is a combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the sentinel value '*' to replace the element indexing into +// a list or set. The sentinel value can be used for each list or set index, if +// there are multiple lists or sets in the attribute path. +// +// The value parameter is the stringified data to check at the given key. Use +// the following attribute type rules to set the value: +// +// - Boolean: "false" or "true". +// - Float/Integer: Stringified number, such as "1.2" or "123". +// - String: No conversion necessary. func TestCheckTypeSetElemAttr(name, attr, value string) TestCheckFunc { return func(s *terraform.State) error { is, err := primaryInstanceState(s, name) @@ -129,12 +213,32 @@ func TestCheckTypeSetElemAttr(name, attr, value string) TestCheckFunc { } } -// TestCheckTypeSetElemAttrPair is a TestCheckFunc that verifies a pair of name/key -// combinations are equal where the first uses the sentinel value to index into a -// TypeSet. +// TestCheckTypeSetElemAttrPair ensures value equality in state between the +// first given name and key combination and the second name and key +// combination. State value checking is only recommended for testing Computed +// attributes and attribute defaults. +// +// For managed resources, the name parameter is a combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The first and second names may use any combination of managed resources +// and/or data sources. // -// E.g., TestCheckTypeSetElemAttrPair("aws_autoscaling_group.bar", "availability_zones.*", "data.aws_availability_zones.available", "names.0") -// E.g., TestCheckTypeSetElemAttrPair("aws_spot_fleet_request.bar", "launch_specification.*.instance_type", "data.data.aws_ec2_instance_type_offering.available", "instance_type") +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the sentinel value '*' to replace the element indexing into +// a list or set. The sentinel value can be used for each list or set index, if +// there are multiple lists or sets in the attribute path. func TestCheckTypeSetElemAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { return func(s *terraform.State) error { isFirst, err := primaryInstanceState(s, nameFirst) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_providers.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_providers.go new file mode 100644 index 000000000..35d4a9bf5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_providers.go @@ -0,0 +1,48 @@ +package resource + +import ( + "context" + "fmt" + "strings" +) + +// providerConfig takes the list of providers in a TestStep and returns a +// config with only empty provider blocks. This is useful for Import, where no +// config is provided, but the providers must be defined. +func (s TestStep) providerConfig(_ context.Context) string { + var providerBlocks, requiredProviderBlocks strings.Builder + + for name, externalProvider := range s.ExternalProviders { + providerBlocks.WriteString(fmt.Sprintf("provider %q {}\n", name)) + + if externalProvider.Source == "" && externalProvider.VersionConstraint == "" { + continue + } + + requiredProviderBlocks.WriteString(fmt.Sprintf(" %s = {\n", name)) + + if externalProvider.Source != "" { + requiredProviderBlocks.WriteString(fmt.Sprintf(" source = %q\n", externalProvider.Source)) + } + + if externalProvider.VersionConstraint != "" { + requiredProviderBlocks.WriteString(fmt.Sprintf(" version = %q\n", externalProvider.VersionConstraint)) + } + + requiredProviderBlocks.WriteString(" }\n") + } + + if requiredProviderBlocks.Len() > 0 { + return fmt.Sprintf(` +terraform { + required_providers { +%[1]s + } +} + +%[2]s +`, strings.TrimSuffix(requiredProviderBlocks.String(), "\n"), providerBlocks.String()) + } + + return providerBlocks.String() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_validate.go new file mode 100644 index 000000000..e9239328c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_validate.go @@ -0,0 +1,99 @@ +package resource + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" +) + +// testStepValidateRequest contains data for the (TestStep).validate() method. +type testStepValidateRequest struct { + // StepNumber is the index of the TestStep in the TestCase.Steps. + StepNumber int + + // TestCaseHasProviders is enabled if the TestCase has set any of + // ExternalProviders, ProtoV5ProviderFactories, ProtoV6ProviderFactories, + // or ProviderFactories. + TestCaseHasProviders bool +} + +// hasProviders returns true if the TestStep has set any of the +// ExternalProviders, ProtoV5ProviderFactories, ProtoV6ProviderFactories, or +// ProviderFactories fields. +func (s TestStep) hasProviders(_ context.Context) bool { + if len(s.ExternalProviders) > 0 { + return true + } + + if len(s.ProtoV5ProviderFactories) > 0 { + return true + } + + if len(s.ProtoV6ProviderFactories) > 0 { + return true + } + + if len(s.ProviderFactories) > 0 { + return true + } + + return false +} + +// validate ensures the TestStep is valid based on the following criteria: +// +// - Config or ImportState is set. +// - Providers are not specified (ExternalProviders, +// ProtoV5ProviderFactories, ProtoV6ProviderFactories, ProviderFactories) +// if specified at the TestCase level. +// - Providers are specified (ExternalProviders, ProtoV5ProviderFactories, +// ProtoV6ProviderFactories, ProviderFactories) if not specified at the +// TestCase level. +// - No overlapping ExternalProviders and ProviderFactories entries +// - ResourceName is not empty when ImportState is true, ImportStateIdFunc +// is not set, and ImportStateId is not set. +// +func (s TestStep) validate(ctx context.Context, req testStepValidateRequest) error { + ctx = logging.TestStepNumberContext(ctx, req.StepNumber) + + logging.HelperResourceTrace(ctx, "Validating TestStep") + + if s.Config == "" && !s.ImportState { + err := fmt.Errorf("TestStep missing Config or ImportState") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + for name := range s.ExternalProviders { + if _, ok := s.ProviderFactories[name]; ok { + err := fmt.Errorf("TestStep provider %q set in both ExternalProviders and ProviderFactories", name) + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + } + + hasProviders := s.hasProviders(ctx) + + if req.TestCaseHasProviders && hasProviders { + err := fmt.Errorf("Providers must only be specified either at the TestCase or TestStep level") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if !req.TestCaseHasProviders && !hasProviders { + err := fmt.Errorf("Providers must be specified at the TestCase level or in all TestStep") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if s.ImportState { + if s.ImportStateId == "" && s.ImportStateIdFunc == nil && s.ResourceName == "" { + err := fmt.Errorf("TestStep ImportState must be specified with ImportStateId, ImportStateIdFunc, or ResourceName") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go index c7721bcd3..855bc4028 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go @@ -171,7 +171,7 @@ func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema { // "foo.#" for a list "foo" and that the indexes are "foo.0", "foo.1", etc. // after that point. func readListField( - r FieldReader, addr []string, schema *Schema) (FieldReadResult, error) { + r FieldReader, addr []string) (FieldReadResult, error) { addrPadded := make([]string, len(addr)+1) copy(addrPadded, addr) addrPadded[len(addrPadded)-1] = "#" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go index 3f1f5e8ab..cd4a993a3 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go @@ -100,7 +100,7 @@ func (r *ConfigFieldReader) readField( case TypeBool, TypeFloat, TypeInt, TypeString: return r.readPrimitive(k, schema) case TypeList: - return readListField(&nestedConfigFieldReader{r}, address, schema) + return readListField(&nestedConfigFieldReader{r}, address) case TypeMap: return r.readMap(k, schema) case TypeSet: @@ -203,7 +203,7 @@ func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult, err := mapValuesToPrimitive(k, result, schema) if err != nil { - return FieldReadResult{}, nil + return FieldReadResult{}, nil //nolint:nilerr // Leave legacy flatmap handling } var value interface{} @@ -258,7 +258,7 @@ func (r *ConfigFieldReader) readSet( // Create the set that will be our result set := schema.ZeroValue().(*Set) - raw, err := readListField(&nestedConfigFieldReader{r}, address, schema) + raw, err := readListField(&nestedConfigFieldReader{r}, address) if err != nil { return FieldReadResult{}, err } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go index 642e7f32e..ca3b714b1 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go @@ -67,7 +67,7 @@ func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) { case TypeBool, TypeInt, TypeFloat, TypeString: res, err = r.readPrimitive(address, schema) case TypeList: - res, err = readListField(r, address, schema) + res, err = readListField(r, address) case TypeMap: res, err = r.readMap(address, schema) case TypeSet: @@ -128,7 +128,7 @@ func (r *DiffFieldReader) readMap( key := address[len(address)-1] err = mapValuesToPrimitive(key, result, schema) if err != nil { - return FieldReadResult{}, nil + return FieldReadResult{}, nil //nolint:nilerr // Leave legacy flatmap handling } var resultVal interface{} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go index 092dd7f68..6fa191522 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go @@ -24,7 +24,7 @@ func (r *MapFieldReader) ReadField(address []string) (FieldReadResult, error) { case TypeBool, TypeInt, TypeFloat, TypeString: return r.readPrimitive(address, schema) case TypeList: - return readListField(r, address, schema) + return readListField(r, address) case TypeMap: return r.readMap(k, schema) case TypeSet: @@ -63,7 +63,7 @@ func (r *MapFieldReader) readMap(k string, schema *Schema) (FieldReadResult, err err := mapValuesToPrimitive(k, result, schema) if err != nil { - return FieldReadResult{}, nil + return FieldReadResult{}, nil //nolint:nilerr // Leave legacy flatmap handling } var resultVal interface{} @@ -159,11 +159,8 @@ func (r *MapFieldReader) readSet( // "ports.1", but the "state" map might have those plus "ports.2". // We don't want "ports.2" countActual[idx] = struct{}{} - if len(countActual) >= countExpected { - return false - } - return true + return len(countActual) < countExpected }) if !completed && err != nil { return FieldReadResult{}, err diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer_map.go index 85d05be4c..39c708b27 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer_map.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer_map.go @@ -100,13 +100,13 @@ func (w *MapFieldWriter) set(addr []string, value interface{}) error { case TypeBool, TypeInt, TypeFloat, TypeString: return w.setPrimitive(addr, value, schema) case TypeList: - return w.setList(addr, value, schema) + return w.setList(addr, value) case TypeMap: - return w.setMap(addr, value, schema) + return w.setMap(addr, value) case TypeSet: return w.setSet(addr, value, schema) case typeObject: - return w.setObject(addr, value, schema) + return w.setObject(addr, value) default: panic(fmt.Sprintf("Unknown type: %#v", schema.Type)) } @@ -114,8 +114,7 @@ func (w *MapFieldWriter) set(addr []string, value interface{}) error { func (w *MapFieldWriter) setList( addr []string, - v interface{}, - schema *Schema) error { + v interface{}) error { k := strings.Join(addr, ".") setElement := func(idx string, value interface{}) error { addrCopy := make([]string, len(addr), len(addr)+1) @@ -148,7 +147,7 @@ func (w *MapFieldWriter) setList( if err != nil { for i := range vs { is := strconv.FormatInt(int64(i), 10) - setElement(is, nil) + _ = setElement(is, nil) // best effort; error returned below } return err @@ -160,8 +159,7 @@ func (w *MapFieldWriter) setList( func (w *MapFieldWriter) setMap( addr []string, - value interface{}, - schema *Schema) error { + value interface{}) error { k := strings.Join(addr, ".") v := reflect.ValueOf(value) vs := make(map[string]interface{}) @@ -176,7 +174,7 @@ func (w *MapFieldWriter) setMap( return fmt.Errorf("%s: must be a map", k) } if v.Type().Key().Kind() != reflect.String { - return fmt.Errorf("%s: keys must strings", k) + return fmt.Errorf("%s: keys must be strings", k) } for _, mk := range v.MapKeys() { mv := v.MapIndex(mk) @@ -207,8 +205,7 @@ func (w *MapFieldWriter) setMap( func (w *MapFieldWriter) setObject( addr []string, - value interface{}, - schema *Schema) error { + value interface{}) error { // Set the entire object. First decode into a proper structure var v map[string]interface{} if err := mapstructure.Decode(value, &v); err != nil { @@ -228,11 +225,13 @@ func (w *MapFieldWriter) setObject( } if err != nil { for k1 := range v { - w.set(append(addrCopy, k1), nil) + _ = w.set(append(addrCopy, k1), nil) // best effort; error returned below } + + return err } - return err + return nil } func (w *MapFieldWriter) setPrimitive( @@ -271,7 +270,7 @@ func (w *MapFieldWriter) setPrimitive( if err := mapstructure.Decode(v, &n); err != nil { return fmt.Errorf("%s: %s", k, err) } - set = strconv.FormatFloat(float64(n), 'G', -1, 64) + set = strconv.FormatFloat(n, 'G', -1, 64) default: return fmt.Errorf("Unknown type: %#v", schema.Type) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go index 4ed1253de..ec3ed07c1 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "fmt" - "log" "strconv" "sync" @@ -16,12 +15,15 @@ import ( "github.com/hashicorp/terraform-plugin-go/tftypes" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -const newExtraKey = "_new_extra_shim" +const ( + newExtraKey = "_new_extra_shim" +) func NewGRPCProviderServer(p *Provider) *GRPCProviderServer { return &GRPCProviderServer{ @@ -53,6 +55,7 @@ func mergeStop(ctx context.Context, cancel context.CancelFunc, stopCh chan struc // It creates a goroutine to wait for the server stop and propagates // cancellation to the derived grpc context. func (s *GRPCProviderServer) StopContext(ctx context.Context) context.Context { + ctx = logging.InitContext(ctx) s.stopMu.Lock() defer s.stopMu.Unlock() @@ -61,7 +64,10 @@ func (s *GRPCProviderServer) StopContext(ctx context.Context) context.Context { return stoppable } -func (s *GRPCProviderServer) GetProviderSchema(_ context.Context, req *tfprotov5.GetProviderSchemaRequest) (*tfprotov5.GetProviderSchemaResponse, error) { +func (s *GRPCProviderServer) GetProviderSchema(ctx context.Context, req *tfprotov5.GetProviderSchemaRequest) (*tfprotov5.GetProviderSchemaResponse, error) { + ctx = logging.InitContext(ctx) + + logging.HelperSchemaTrace(ctx, "Getting provider schema") resp := &tfprotov5.GetProviderSchemaResponse{ ResourceSchemas: make(map[string]*tfprotov5.Schema), @@ -69,24 +75,28 @@ func (s *GRPCProviderServer) GetProviderSchema(_ context.Context, req *tfprotov5 } resp.Provider = &tfprotov5.Schema{ - Block: convert.ConfigSchemaToProto(s.getProviderSchemaBlock()), + Block: convert.ConfigSchemaToProto(ctx, s.getProviderSchemaBlock()), } resp.ProviderMeta = &tfprotov5.Schema{ - Block: convert.ConfigSchemaToProto(s.getProviderMetaSchemaBlock()), + Block: convert.ConfigSchemaToProto(ctx, s.getProviderMetaSchemaBlock()), } for typ, res := range s.provider.ResourcesMap { + logging.HelperSchemaTrace(ctx, "Found resource type", map[string]interface{}{logging.KeyResourceType: typ}) + resp.ResourceSchemas[typ] = &tfprotov5.Schema{ Version: int64(res.SchemaVersion), - Block: convert.ConfigSchemaToProto(res.CoreConfigSchema()), + Block: convert.ConfigSchemaToProto(ctx, res.CoreConfigSchema()), } } for typ, dat := range s.provider.DataSourcesMap { + logging.HelperSchemaTrace(ctx, "Found data source type", map[string]interface{}{logging.KeyDataSourceType: typ}) + resp.DataSourceSchemas[typ] = &tfprotov5.Schema{ Version: int64(dat.SchemaVersion), - Block: convert.ConfigSchemaToProto(dat.CoreConfigSchema()), + Block: convert.ConfigSchemaToProto(ctx, dat.CoreConfigSchema()), } } @@ -111,14 +121,17 @@ func (s *GRPCProviderServer) getDatasourceSchemaBlock(name string) *configschema return dat.CoreConfigSchema() } -func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *tfprotov5.PrepareProviderConfigRequest) (*tfprotov5.PrepareProviderConfigResponse, error) { +func (s *GRPCProviderServer) PrepareProviderConfig(ctx context.Context, req *tfprotov5.PrepareProviderConfigRequest) (*tfprotov5.PrepareProviderConfigResponse, error) { + ctx = logging.InitContext(ctx) resp := &tfprotov5.PrepareProviderConfigResponse{} + logging.HelperSchemaTrace(ctx, "Preparing provider configuration") + schemaBlock := s.getProviderSchemaBlock() configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -156,8 +169,7 @@ func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *tfpro // find a default value if it exists def, err := attrSchema.DefaultValue() if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error getting default for %q: %s", getAttr.Name, err)) - return val, err + return val, fmt.Errorf("error getting default for %q: %w", getAttr.Name, err) } // no default @@ -171,41 +183,43 @@ func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *tfpro // helper/schema used to allow setting "" to a bool if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { // return a warning about the conversion - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, "provider set empty string as default value for bool "+getAttr.Name) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, "provider set empty string as default value for bool "+getAttr.Name) tmpVal = cty.False } val, err = ctyconvert.Convert(tmpVal, val.Type()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error setting default for %q: %s", getAttr.Name, err)) + return val, fmt.Errorf("error setting default for %q: %w", getAttr.Name, err) } - return val, err + return val, nil }) if err != nil { - // any error here was already added to the diagnostics + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } configVal, err = schemaBlock.CoerceValue(configVal) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(configVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + if err := validateConfigNulls(ctx, configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, s.provider.Validate(config)) + logging.HelperSchemaTrace(ctx, "Calling downstream") + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, s.provider.Validate(config)) + logging.HelperSchemaTrace(ctx, "Called downstream") preparedConfigMP, err := msgpack.Marshal(configVal, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -214,54 +228,61 @@ func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *tfpro return resp, nil } -func (s *GRPCProviderServer) ValidateResourceTypeConfig(_ context.Context, req *tfprotov5.ValidateResourceTypeConfigRequest) (*tfprotov5.ValidateResourceTypeConfigResponse, error) { +func (s *GRPCProviderServer) ValidateResourceTypeConfig(ctx context.Context, req *tfprotov5.ValidateResourceTypeConfigRequest) (*tfprotov5.ValidateResourceTypeConfigResponse, error) { + ctx = logging.InitContext(ctx) resp := &tfprotov5.ValidateResourceTypeConfigResponse{} schemaBlock := s.getResourceSchemaBlock(req.TypeName) configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, s.provider.ValidateResource(req.TypeName, config)) + logging.HelperSchemaTrace(ctx, "Calling downstream") + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, s.provider.ValidateResource(req.TypeName, config)) + logging.HelperSchemaTrace(ctx, "Called downstream") return resp, nil } -func (s *GRPCProviderServer) ValidateDataSourceConfig(_ context.Context, req *tfprotov5.ValidateDataSourceConfigRequest) (*tfprotov5.ValidateDataSourceConfigResponse, error) { +func (s *GRPCProviderServer) ValidateDataSourceConfig(ctx context.Context, req *tfprotov5.ValidateDataSourceConfigRequest) (*tfprotov5.ValidateDataSourceConfigResponse, error) { + ctx = logging.InitContext(ctx) resp := &tfprotov5.ValidateDataSourceConfigResponse{} schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(configVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + if err := validateConfigNulls(ctx, configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, s.provider.ValidateDataSource(req.TypeName, config)) + logging.HelperSchemaTrace(ctx, "Calling downstream") + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, s.provider.ValidateDataSource(req.TypeName, config)) + logging.HelperSchemaTrace(ctx, "Called downstream") return resp, nil } func (s *GRPCProviderServer) UpgradeResourceState(ctx context.Context, req *tfprotov5.UpgradeResourceStateRequest) (*tfprotov5.UpgradeResourceStateResponse, error) { + ctx = logging.InitContext(ctx) resp := &tfprotov5.UpgradeResourceStateResponse{} res, ok := s.provider.ResourcesMap[req.TypeName] if !ok { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) return resp, nil } schemaBlock := s.getResourceSchemaBlock(req.TypeName) @@ -275,9 +296,11 @@ func (s *GRPCProviderServer) UpgradeResourceState(ctx context.Context, req *tfpr // We first need to upgrade a flatmap state if it exists. // There should never be both a JSON and Flatmap state in the request. case len(req.RawState.Flatmap) > 0: + logging.HelperSchemaTrace(ctx, "Upgrading flatmap state") + jsonMap, version, err = s.upgradeFlatmapState(ctx, version, req.RawState.Flatmap, res) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } // if there's a JSON state, we need to decode it. @@ -288,29 +311,31 @@ func (s *GRPCProviderServer) UpgradeResourceState(ctx context.Context, req *tfpr err = json.Unmarshal(req.RawState.JSON, &jsonMap) } if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } default: - log.Println("[DEBUG] no state provided to upgrade") + logging.HelperSchemaDebug(ctx, "no state provided to upgrade") return resp, nil } // complete the upgrade of the JSON states + logging.HelperSchemaTrace(ctx, "Upgrading JSON state") + jsonMap, err = s.upgradeJSONState(ctx, version, jsonMap, res) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } // The provider isn't required to clean out removed fields - s.removeAttributes(jsonMap, schemaBlock.ImpliedType()) + s.removeAttributes(ctx, jsonMap, schemaBlock.ImpliedType()) // now we need to turn the state into the default json representation, so // that it can be re-decoded using the actual schema. val, err := JSONMapToStateValue(jsonMap, schemaBlock) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -319,7 +344,7 @@ func (s *GRPCProviderServer) UpgradeResourceState(ctx context.Context, req *tfpr // First we need to CoerceValue to ensure that all object types match. val, err = schemaBlock.CoerceValue(val) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } // Normalize the value and fill in any missing blocks. @@ -328,7 +353,7 @@ func (s *GRPCProviderServer) UpgradeResourceState(ctx context.Context, req *tfpr // encode the final state to the expected msgpack format newStateMP, err := msgpack.Marshal(val, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -341,7 +366,7 @@ func (s *GRPCProviderServer) UpgradeResourceState(ctx context.Context, req *tfpr // map[string]interface{}. // upgradeFlatmapState returns the json map along with the corresponding schema // version. -func (s *GRPCProviderServer) upgradeFlatmapState(ctx context.Context, version int, m map[string]string, res *Resource) (map[string]interface{}, int, error) { +func (s *GRPCProviderServer) upgradeFlatmapState(_ context.Context, version int, m map[string]string, res *Resource) (map[string]interface{}, int, error) { // this will be the version we've upgraded so, defaulting to the given // version in case no migration was called. upgradedVersion := version @@ -433,7 +458,7 @@ func (s *GRPCProviderServer) upgradeJSONState(ctx context.Context, version int, // Remove any attributes no longer present in the schema, so that the json can // be correctly decoded. -func (s *GRPCProviderServer) removeAttributes(v interface{}, ty cty.Type) { +func (s *GRPCProviderServer) removeAttributes(ctx context.Context, v interface{}, ty cty.Type) { // we're only concerned with finding maps that corespond to object // attributes switch v := v.(type) { @@ -442,7 +467,7 @@ func (s *GRPCProviderServer) removeAttributes(v interface{}, ty cty.Type) { if ty.IsListType() || ty.IsSetType() { eTy := ty.ElementType() for _, eV := range v { - s.removeAttributes(eV, eTy) + s.removeAttributes(ctx, eV, eTy) } } return @@ -451,20 +476,20 @@ func (s *GRPCProviderServer) removeAttributes(v interface{}, ty cty.Type) { if ty.IsMapType() { eTy := ty.ElementType() for _, eV := range v { - s.removeAttributes(eV, eTy) + s.removeAttributes(ctx, eV, eTy) } return } if ty == cty.DynamicPseudoType { - log.Printf("[DEBUG] ignoring dynamic block: %#v\n", v) + logging.HelperSchemaDebug(ctx, "ignoring dynamic block", map[string]interface{}{"block": v}) return } if !ty.IsObjectType() { // This shouldn't happen, and will fail to decode further on, so // there's no need to handle it here. - log.Printf("[WARN] unexpected type %#v for map in json state", ty) + logging.HelperSchemaWarn(ctx, "unexpected type for map in JSON state", map[string]interface{}{"type": ty}) return } @@ -472,17 +497,21 @@ func (s *GRPCProviderServer) removeAttributes(v interface{}, ty cty.Type) { for attr, attrV := range v { attrTy, ok := attrTypes[attr] if !ok { - log.Printf("[DEBUG] attribute %q no longer present in schema", attr) + logging.HelperSchemaDebug(ctx, "attribute no longer present in schema", map[string]interface{}{"attribute": attr}) delete(v, attr) continue } - s.removeAttributes(attrV, attrTy) + s.removeAttributes(ctx, attrV, attrTy) } } } -func (s *GRPCProviderServer) StopProvider(_ context.Context, _ *tfprotov5.StopProviderRequest) (*tfprotov5.StopProviderResponse, error) { +func (s *GRPCProviderServer) StopProvider(ctx context.Context, _ *tfprotov5.StopProviderRequest) (*tfprotov5.StopProviderResponse, error) { + ctx = logging.InitContext(ctx) + + logging.HelperSchemaTrace(ctx, "Stopping provider") + s.stopMu.Lock() defer s.stopMu.Unlock() @@ -491,25 +520,28 @@ func (s *GRPCProviderServer) StopProvider(_ context.Context, _ *tfprotov5.StopPr // reset the stop signal s.stopCh = make(chan struct{}) + logging.HelperSchemaTrace(ctx, "Stopped provider") + return &tfprotov5.StopProviderResponse{}, nil } func (s *GRPCProviderServer) ConfigureProvider(ctx context.Context, req *tfprotov5.ConfigureProviderRequest) (*tfprotov5.ConfigureProviderResponse, error) { + ctx = logging.InitContext(ctx) resp := &tfprotov5.ConfigureProviderResponse{} schemaBlock := s.getProviderSchemaBlock() configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } s.provider.TerraformVersion = req.TerraformVersion // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(configVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + if err := validateConfigNulls(ctx, configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -520,13 +552,18 @@ func (s *GRPCProviderServer) ConfigureProvider(ctx context.Context, req *tfproto // function. Ideally a provider should migrate to the context aware API that receives // request scoped contexts, however this is a large undertaking for very large providers. ctxHack := context.WithValue(ctx, StopContextKey, s.StopContext(context.Background())) + + logging.HelperSchemaTrace(ctx, "Calling downstream") diags := s.provider.Configure(ctxHack, config) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, diags) + logging.HelperSchemaTrace(ctx, "Called downstream") + + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, diags) return resp, nil } func (s *GRPCProviderServer) ReadResource(ctx context.Context, req *tfprotov5.ReadResourceRequest) (*tfprotov5.ReadResourceResponse, error) { + ctx = logging.InitContext(ctx) resp := &tfprotov5.ReadResourceResponse{ // helper/schema did previously handle private data during refresh, but // core is now going to expect this to be maintained in order to @@ -536,20 +573,20 @@ func (s *GRPCProviderServer) ReadResource(ctx context.Context, req *tfprotov5.Re res, ok := s.provider.ResourcesMap[req.TypeName] if !ok { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) return resp, nil } schemaBlock := s.getResourceSchemaBlock(req.TypeName) stateVal, err := msgpack.Unmarshal(req.CurrentState.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } instanceState, err := res.ShimInstanceStateFromValue(stateVal) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } instanceState.RawState = stateVal @@ -557,7 +594,7 @@ func (s *GRPCProviderServer) ReadResource(ctx context.Context, req *tfprotov5.Re private := make(map[string]interface{}) if len(req.Private) > 0 { if err := json.Unmarshal(req.Private, &private); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } } @@ -567,14 +604,14 @@ func (s *GRPCProviderServer) ReadResource(ctx context.Context, req *tfprotov5.Re if pmSchemaBlock != nil && req.ProviderMeta != nil { providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.MsgPack, pmSchemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } instanceState.ProviderMeta = providerSchemaVal } newInstanceState, diags := res.RefreshWithoutUpgrade(ctx, instanceState, s.provider.Meta()) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, diags) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, diags) if diags.HasError() { return resp, nil } @@ -585,7 +622,7 @@ func (s *GRPCProviderServer) ReadResource(ctx context.Context, req *tfprotov5.Re // to see a null value (in the cty sense) in that case. newStateMP, err := msgpack.Marshal(cty.NullVal(schemaBlock.ImpliedType()), schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) } resp.NewState = &tfprotov5.DynamicValue{ MsgPack: newStateMP, @@ -598,7 +635,7 @@ func (s *GRPCProviderServer) ReadResource(ctx context.Context, req *tfprotov5.Re newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(newInstanceState.Attributes, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -607,7 +644,7 @@ func (s *GRPCProviderServer) ReadResource(ctx context.Context, req *tfprotov5.Re newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -619,6 +656,7 @@ func (s *GRPCProviderServer) ReadResource(ctx context.Context, req *tfprotov5.Re } func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprotov5.PlanResourceChangeRequest) (*tfprotov5.PlanResourceChangeResponse, error) { + ctx = logging.InitContext(ctx) resp := &tfprotov5.PlanResourceChangeResponse{} // This is a signal to Terraform Core that we're doing the best we can to @@ -627,18 +665,18 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot // forward to any new SDK implementations, since setting it prevents us // from catching certain classes of provider bug that can lead to // confusing downstream errors. - resp.UnsafeToUseLegacyTypeSystem = true + resp.UnsafeToUseLegacyTypeSystem = true //nolint:staticcheck res, ok := s.provider.ResourcesMap[req.TypeName] if !ok { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) return resp, nil } schemaBlock := s.getResourceSchemaBlock(req.TypeName) priorStateVal, err := msgpack.Unmarshal(req.PriorState.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -646,7 +684,7 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot proposedNewStateVal, err := msgpack.Unmarshal(req.ProposedNewState.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -659,13 +697,13 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } priorState, err := res.ShimInstanceStateFromValue(priorStateVal) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } priorState.RawState = priorStateVal @@ -674,7 +712,7 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot priorPrivate := make(map[string]interface{}) if len(req.PriorPrivate) > 0 { if err := json.Unmarshal(req.PriorPrivate, &priorPrivate); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } } @@ -685,15 +723,15 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot if pmSchemaBlock != nil && req.ProviderMeta != nil { providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.MsgPack, pmSchemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } priorState.ProviderMeta = providerSchemaVal } // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(proposedNewStateVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + if err := validateConfigNulls(ctx, proposedNewStateVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -702,7 +740,7 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot diff, err := res.SimpleDiff(ctx, priorState, cfg, s.provider.Meta()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -734,22 +772,27 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot // now we need to apply the diff to the prior state, so get the planned state plannedAttrs, err := diff.Apply(priorState.Attributes, schemaBlock) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + plannedStateVal, err := hcl2shim.HCL2ValueFromFlatmap(plannedAttrs, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } plannedStateVal, err = schemaBlock.CoerceValue(plannedStateVal) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } plannedStateVal = normalizeNullValues(plannedStateVal, proposedNewStateVal, false) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -777,7 +820,7 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot plannedMP, err := msgpack.Marshal(plannedStateVal, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } resp.PlannedState = &tfprotov5.DynamicValue{ @@ -787,12 +830,12 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot // encode any timeouts into the diff Meta t := &ResourceTimeout{} if err := t.ConfigDecode(res, cfg); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } if err := t.DiffEncode(diff); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -815,7 +858,7 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot // the Meta field gets encoded into PlannedPrivate plannedPrivate, err := json.Marshal(privateMap) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } resp.PlannedPrivate = plannedPrivate @@ -842,7 +885,7 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -855,6 +898,7 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot } func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *tfprotov5.ApplyResourceChangeRequest) (*tfprotov5.ApplyResourceChangeResponse, error) { + ctx = logging.InitContext(ctx) resp := &tfprotov5.ApplyResourceChangeResponse{ // Start with the existing state as a fallback NewState: req.PriorState, @@ -862,39 +906,39 @@ func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *tfpro res, ok := s.provider.ResourcesMap[req.TypeName] if !ok { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) return resp, nil } schemaBlock := s.getResourceSchemaBlock(req.TypeName) priorStateVal, err := msgpack.Unmarshal(req.PriorState.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } plannedStateVal, err := msgpack.Unmarshal(req.PlannedState.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } priorState, err := res.ShimInstanceStateFromValue(priorStateVal) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } private := make(map[string]interface{}) if len(req.PlannedPrivate) > 0 { if err := json.Unmarshal(req.PlannedPrivate, &private); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } } @@ -916,7 +960,7 @@ func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *tfpro } else { diff, err = DiffFromValues(ctx, priorStateVal, plannedStateVal, configVal, stripResourceModifiers(res)) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } } @@ -968,14 +1012,14 @@ func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *tfpro if pmSchemaBlock != nil && req.ProviderMeta != nil { providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.MsgPack, pmSchemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } priorState.ProviderMeta = providerSchemaVal } newInstanceState, diags := res.Apply(ctx, priorState, diff, s.provider.Meta()) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, diags) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, diags) newStateVal := cty.NullVal(schemaBlock.ImpliedType()) @@ -985,7 +1029,7 @@ func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *tfpro if destroy || newInstanceState == nil || newInstanceState.Attributes == nil || newInstanceState.ID == "" { newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } resp.NewState = &tfprotov5.DynamicValue{ @@ -998,7 +1042,7 @@ func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *tfpro // entire object, even if the new state was nil. newStateVal, err = StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -1008,7 +1052,7 @@ func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *tfpro newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } resp.NewState = &tfprotov5.DynamicValue{ @@ -1017,7 +1061,7 @@ func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *tfpro meta, err := json.Marshal(newInstanceState.Meta) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } resp.Private = meta @@ -1028,12 +1072,13 @@ func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *tfpro // forward to any new SDK implementations, since setting it prevents us // from catching certain classes of provider bug that can lead to // confusing downstream errors. - resp.UnsafeToUseLegacyTypeSystem = true + resp.UnsafeToUseLegacyTypeSystem = true //nolint:staticcheck return resp, nil } func (s *GRPCProviderServer) ImportResourceState(ctx context.Context, req *tfprotov5.ImportResourceStateRequest) (*tfprotov5.ImportResourceStateResponse, error) { + ctx = logging.InitContext(ctx) resp := &tfprotov5.ImportResourceStateResponse{} info := &terraform.InstanceInfo{ @@ -1042,7 +1087,7 @@ func (s *GRPCProviderServer) ImportResourceState(ctx context.Context, req *tfpro newInstanceStates, err := s.provider.ImportState(ctx, info, req.ID) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -1058,7 +1103,7 @@ func (s *GRPCProviderServer) ImportResourceState(ctx context.Context, req *tfpro schemaBlock := s.getResourceSchemaBlock(resourceType) newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -1067,13 +1112,13 @@ func (s *GRPCProviderServer) ImportResourceState(ctx context.Context, req *tfpro newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } meta, err := json.Marshal(is.Meta) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -1092,19 +1137,20 @@ func (s *GRPCProviderServer) ImportResourceState(ctx context.Context, req *tfpro } func (s *GRPCProviderServer) ReadDataSource(ctx context.Context, req *tfprotov5.ReadDataSourceRequest) (*tfprotov5.ReadDataSourceResponse, error) { + ctx = logging.InitContext(ctx) resp := &tfprotov5.ReadDataSourceResponse{} schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(configVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + if err := validateConfigNulls(ctx, configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -1114,12 +1160,12 @@ func (s *GRPCProviderServer) ReadDataSource(ctx context.Context, req *tfprotov5. // the old behavior res, ok := s.provider.DataSourcesMap[req.TypeName] if !ok { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown data source: %s", req.TypeName)) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, fmt.Errorf("unknown data source: %s", req.TypeName)) return resp, nil } diff, err := res.Diff(ctx, nil, config, s.provider.Meta()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -1131,14 +1177,14 @@ func (s *GRPCProviderServer) ReadDataSource(ctx context.Context, req *tfprotov5. // now we can get the new complete data source newInstanceState, diags := res.ReadDataApply(ctx, diff, s.provider.Meta()) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, diags) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, diags) if diags.HasError() { return resp, nil } newStateVal, err := StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -1146,7 +1192,7 @@ func (s *GRPCProviderServer) ReadDataSource(ctx context.Context, req *tfprotov5. newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } resp.State = &tfprotov5.DynamicValue{ @@ -1439,7 +1485,7 @@ func normalizeNullValues(dst, src cty.Value, apply bool) cty.Value { // appears in a list-like attribute (list, set, tuple) will present a nil value // to helper/schema which can panic. Return an error to the user in this case, // indicating the attribute with the null value. -func validateConfigNulls(v cty.Value, path cty.Path) []*tfprotov5.Diagnostic { +func validateConfigNulls(ctx context.Context, v cty.Value, path cty.Path) []*tfprotov5.Diagnostic { var diags []*tfprotov5.Diagnostic if v.IsNull() || !v.IsKnown() { return diags @@ -1468,8 +1514,8 @@ func validateConfigNulls(v cty.Value, path cty.Path) []*tfprotov5.Diagnostic { continue } - d := validateConfigNulls(ev, append(path, cty.IndexStep{Key: kv})) - diags = convert.AppendProtoDiag(diags, d) + d := validateConfigNulls(ctx, ev, append(path, cty.IndexStep{Key: kv})) + diags = convert.AppendProtoDiag(ctx, diags, d) } case v.Type().IsMapType() || v.Type().IsObjectType(): @@ -1483,8 +1529,8 @@ func validateConfigNulls(v cty.Value, path cty.Path) []*tfprotov5.Diagnostic { case v.Type().IsObjectType(): step = cty.GetAttrStep{Name: kv.AsString()} } - d := validateConfigNulls(ev, append(path, step)) - diags = convert.AppendProtoDiag(diags, d) + d := validateConfigNulls(ctx, ev, append(path, step)) + diags = convert.AppendProtoDiag(ctx, diags, d) } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go index 0d1f8e23c..91a21b389 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/meta" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) @@ -265,7 +266,7 @@ func (p *Provider) Configure(ctx context.Context, c *terraform.ResourceConfig) d } if p.configured { - log.Printf("[WARN] Previously configured provider being re-configured. This can cause issues in concurrent testing if the configurations are not equal.") + logging.HelperSchemaWarn(ctx, "Previously configured provider being re-configured. This can cause issues in concurrent testing if the configurations are not equal.") } sm := schemaMap(p.Schema) @@ -378,11 +379,15 @@ func (p *Provider) ImportState( results := []*ResourceData{data} if r.Importer.State != nil || r.Importer.StateContext != nil { var err error + logging.HelperSchemaTrace(ctx, "Calling downstream") + if r.Importer.StateContext != nil { results, err = r.Importer.StateContext(ctx, data, p.meta) } else { results, err = r.Importer.State(data, p.meta) } + logging.HelperSchemaTrace(ctx, "Called downstream") + if err != nil { return nil, err } @@ -391,6 +396,21 @@ func (p *Provider) ImportState( // Convert the results to InstanceState values and return it states := make([]*terraform.InstanceState, len(results)) for i, r := range results { + if r == nil { + return nil, fmt.Errorf("The provider returned a missing resource during ImportResourceState. " + + "This is generally a bug in the resource implementation for import. " + + "Resource import code should return an error for missing resources and skip returning a missing or empty ResourceData. " + + "Please report this to the provider developers.") + } + + if r.Id() == "" { + return nil, fmt.Errorf("The provider returned a resource missing an identifier during ImportResourceState. " + + "This is generally a bug in the resource implementation for import. " + + "Resource import code should not call d.SetId(\"\") or create an empty ResourceData. " + + "If the resource is missing, instead return an error. " + + "Please report this to the provider developers.") + } + states[i] = r.State() } @@ -398,10 +418,10 @@ func (p *Provider) ImportState( // isn't obvious so we circumvent that with a friendlier error. for _, s := range states { if s == nil { - return nil, fmt.Errorf( - "nil entry in ImportState results. This is always a bug with\n" + - "the resource that is being imported. Please report this as\n" + - "a bug to Terraform.") + return nil, fmt.Errorf("The provider returned a missing resource during ImportResourceState. " + + "This is generally a bug in the resource implementation for import. " + + "Resource import code should return an error for missing resources. " + + "Please report this to the provider developers.") } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go index b0a8631ca..5b40bec89 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go @@ -4,12 +4,12 @@ import ( "context" "errors" "fmt" - "log" "strconv" "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) @@ -31,35 +31,44 @@ var ReservedResourceFields = []string{ "provisioner", } -// Resource represents a thing in Terraform that has a set of configurable -// attributes and a lifecycle (create, read, update, delete). +// Resource is an abstraction for multiple Terraform concepts: // -// The Resource schema is an abstraction that allows provider writers to -// worry only about CRUD operations while off-loading validation, diff -// generation, etc. to this higher level library. +// - Managed Resource: An infrastructure component with a schema, lifecycle +// operations such as create, read, update, and delete +// (CRUD), and optional implementation details such as +// import support, upgrade state support, and difference +// customization. +// - Data Resource: Also known as a data source. An infrastructure component +// with a schema and only the read lifecycle operation. +// - Block: When implemented within a Schema type Elem field, a configuration +// block that contains nested schema information such as attributes +// and blocks. // -// In spite of the name, this struct is not used only for terraform resources, -// but also for data sources. In the case of data sources, the Create, -// Update and Delete functions must not be provided. +// To fully implement managed resources, the Provider type ResourcesMap field +// should include a reference to an implementation of this type. To fully +// implement data resources, the Provider type DataSourcesMap field should +// include a reference to an implementation of this type. +// +// Each field further documents any constraints based on the Terraform concept +// being implemented. type Resource struct { - // Schema is the schema for the configuration of this resource. - // - // The keys of this map are the configuration keys, and the values - // describe the schema of the configuration value. + // Schema is the structure and type information for this component. This + // field is required for all Resource concepts. // - // The schema is used to represent both configurable data as well - // as data that might be computed in the process of creating this - // resource. + // The keys of this map are the names used in a practitioner configuration, + // such as the attribute or block name. The values describe the structure + // and type information of that attribute or block. Schema map[string]*Schema // SchemaVersion is the version number for this resource's Schema - // definition. The current SchemaVersion stored in the state for each - // resource. Provider authors can increment this version number - // when Schema semantics change. If the State's SchemaVersion is less than - // the current SchemaVersion, the InstanceState is yielded to the - // MigrateState callback, where the provider can make whatever changes it - // needs to update the state to be compatible to the latest version of the - // Schema. + // definition. This field is only valid when the Resource is a managed + // resource. + // + // The current SchemaVersion stored in the state for each resource. + // Provider authors can increment this version number when Schema semantics + // change in an incompatible manner. If the state's SchemaVersion is less + // than the current SchemaVersion, the MigrateState and StateUpgraders + // functionality is executed to upgrade the state information. // // When unset, SchemaVersion defaults to 0, so provider authors can start // their Versioning at any integer >= 1 @@ -67,6 +76,7 @@ type Resource struct { // MigrateState is responsible for updating an InstanceState with an old // version to the format expected by the current version of the Schema. + // This field is only valid when the Resource is a managed resource. // // It is called during Refresh if the State's stored SchemaVersion is less // than the current SchemaVersion of the Resource. @@ -86,7 +96,8 @@ type Resource struct { // StateUpgraders contains the functions responsible for upgrading an // existing state with an old schema version to a newer schema. It is // called specifically by Terraform when the stored schema version is less - // than the current SchemaVersion of the Resource. + // than the current SchemaVersion of the Resource. This field is only valid + // when the Resource is a managed resource. // // StateUpgraders map specific schema versions to a StateUpgrader // function. The registered versions are expected to be ordered, @@ -95,57 +106,261 @@ type Resource struct { // MigrateState. StateUpgraders []StateUpgrader - // The functions below are the CRUD operations for this resource. + // Create is called when the provider must create a new instance of a + // managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Create, CreateContext, or + // CreateWithoutTimeout should be implemented. // - // Deprecated: Please use the context aware equivalents instead. Only one of - // the operations or context aware equivalent can be set, not both. + // The *ResourceData parameter contains the plan and state data for this + // managed resource instance. The available data in the Get* methods is the + // the proposed state, which is the merged data of the practitioner + // configuration and any CustomizeDiff field logic. + // + // The SetId method must be called with a non-empty value for the managed + // resource instance to be properly saved into the Terraform state and + // avoid a "inconsistent result after apply" error. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The error return parameter, if not nil, will be converted into an error + // diagnostic when passed back to Terraform. + // + // Deprecated: Use CreateContext or CreateWithoutTimeout instead. This + // implementation does not support request cancellation initiated by + // Terraform, such as a system or practitioner sending SIGINT (Ctrl-c). + // This implementation also does not support warning diagnostics. Create CreateFunc - // Deprecated: Please use the context aware equivalents instead. + + // Read is called when the provider must refresh the state of a managed + // resource instance or data resource instance. This field is only valid + // when the Resource is a managed resource or data resource. Only one of + // Read, ReadContext, or ReadWithoutTimeout should be implemented. + // + // The *ResourceData parameter contains the state data for this managed + // resource instance or data resource instance. + // + // Managed resources can signal to Terraform that the managed resource + // instance no longer exists and potentially should be recreated by calling + // the SetId method with an empty string ("") parameter and without + // returning an error. + // + // Data resources that are designed to return state for a singular + // infrastructure component should conventionally return an error if that + // infrastructure does not exist and omit any calls to the + // SetId method. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The error return parameter, if not nil, will be converted into an error + // diagnostic when passed back to Terraform. + // + // Deprecated: Use ReadContext or ReadWithoutTimeout instead. This + // implementation does not support request cancellation initiated by + // Terraform, such as a system or practitioner sending SIGINT (Ctrl-c). + // This implementation also does not support warning diagnostics. Read ReadFunc - // Deprecated: Please use the context aware equivalents instead. + + // Update is called when the provider must update an instance of a + // managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Update, UpdateContext, or + // UpdateWithoutTimeout should be implemented. + // + // This implementation is optional. If omitted, all Schema must enable + // the ForceNew field and any practitioner changes that would have + // caused and update will instead destroy and recreate the infrastructure + // compontent. + // + // The *ResourceData parameter contains the plan and state data for this + // managed resource instance. The available data in the Get* methods is the + // the proposed state, which is the merged data of the prior state, + // practitioner configuration, and any CustomizeDiff field logic. The + // available data for the GetChange* and HasChange* methods is the prior + // state and proposed state. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The error return parameter, if not nil, will be converted into an error + // diagnostic when passed back to Terraform. + // + // Deprecated: Use UpdateContext or UpdateWithoutTimeout instead. This + // implementation does not support request cancellation initiated by + // Terraform, such as a system or practitioner sending SIGINT (Ctrl-c). + // This implementation also does not support warning diagnostics. Update UpdateFunc - // Deprecated: Please use the context aware equivalents instead. + + // Delete is called when the provider must destroy the instance of a + // managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Delete, DeleteContext, or + // DeleteWithoutTimeout should be implemented. + // + // The *ResourceData parameter contains the state data for this managed + // resource instance. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The error return parameter, if not nil, will be converted into an error + // diagnostic when passed back to Terraform. + // + // Deprecated: Use DeleteContext or DeleteWithoutTimeout instead. This + // implementation does not support request cancellation initiated by + // Terraform, such as a system or practitioner sending SIGINT (Ctrl-c). + // This implementation also does not support warning diagnostics. Delete DeleteFunc // Exists is a function that is called to check if a resource still - // exists. If this returns false, then this will affect the diff + // exists. This field is only valid when the Resource is a managed + // resource. + // + // If this returns false, then this will affect the diff // accordingly. If this function isn't set, it will not be called. You // can also signal existence in the Read method by calling d.SetId("") // if the Resource is no longer present and should be removed from state. // The *ResourceData passed to Exists should _not_ be modified. // - // Deprecated: ReadContext should be able to encapsulate the logic of Exists + // Deprecated: Remove in preference of ReadContext or ReadWithoutTimeout. Exists ExistsFunc - // The functions below are the CRUD operations for this resource. + // CreateContext is called when the provider must create a new instance of + // a managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Create, CreateContext, or + // CreateWithoutTimeout should be implemented. + // + // The Context parameter stores SDK information, such as loggers and + // timeout deadlines. It also is wired to receive any cancellation from + // Terraform such as a system or practitioner sending SIGINT (Ctrl-c). // - // The only optional operation is Update. If Update is not - // implemented, then updates will not be supported for this resource. + // By default, CreateContext has a 20 minute timeout. Use the Timeouts + // field to control the default duration or implement CreateWithoutTimeout + // instead of CreateContext to remove the default timeout. // - // The ResourceData parameter in the functions below are used to - // query configuration and changes for the resource as well as to set - // the ID, computed data, etc. + // The *ResourceData parameter contains the plan and state data for this + // managed resource instance. The available data in the Get* methods is the + // the proposed state, which is the merged data of the practitioner + // configuration and any CustomizeDiff field logic. // - // The interface{} parameter is the result of the ConfigureFunc in - // the provider for this resource. If the provider does not define - // a ConfigureFunc, this will be nil. This parameter should be used - // to store API clients, configuration structures, etc. + // The SetId method must be called with a non-empty value for the managed + // resource instance to be properly saved into the Terraform state and + // avoid a "inconsistent result after apply" error. // - // These functions are passed a context configured to timeout with whatever - // was set as the timeout for this operation. Useful for forwarding on to - // backend SDK's that accept context. The context will also cancel if - // Terraform sends a cancellation signal. + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. // - // These functions return diagnostics, allowing developers to build - // a list of warnings and errors to be presented to the Terraform user. - // The AttributePath of those diagnostics should be built within these - // functions, please consult go-cty documentation for building a cty.Path + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. CreateContext CreateContextFunc - ReadContext ReadContextFunc + + // ReadContext is called when the provider must refresh the state of a managed + // resource instance or data resource instance. This field is only valid + // when the Resource is a managed resource or data resource. Only one of + // Read, ReadContext, or ReadWithoutTimeout should be implemented. + // + // The Context parameter stores SDK information, such as loggers and + // timeout deadlines. It also is wired to receive any cancellation from + // Terraform such as a system or practitioner sending SIGINT (Ctrl-c). + // + // By default, ReadContext has a 20 minute timeout. Use the Timeouts + // field to control the default duration or implement ReadWithoutTimeout + // instead of ReadContext to remove the default timeout. + // + // The *ResourceData parameter contains the state data for this managed + // resource instance or data resource instance. + // + // Managed resources can signal to Terraform that the managed resource + // instance no longer exists and potentially should be recreated by calling + // the SetId method with an empty string ("") parameter and without + // returning an error. + // + // Data resources that are designed to return state for a singular + // infrastructure component should conventionally return an error if that + // infrastructure does not exist and omit any calls to the + // SetId method. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. + ReadContext ReadContextFunc + + // UpdateContext is called when the provider must update an instance of a + // managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Update, UpdateContext, or + // UpdateWithoutTimeout should be implemented. + // + // This implementation is optional. If omitted, all Schema must enable + // the ForceNew field and any practitioner changes that would have + // caused and update will instead destroy and recreate the infrastructure + // compontent. + // + // The Context parameter stores SDK information, such as loggers and + // timeout deadlines. It also is wired to receive any cancellation from + // Terraform such as a system or practitioner sending SIGINT (Ctrl-c). + // + // By default, UpdateContext has a 20 minute timeout. Use the Timeouts + // field to control the default duration or implement UpdateWithoutTimeout + // instead of UpdateContext to remove the default timeout. + // + // The *ResourceData parameter contains the plan and state data for this + // managed resource instance. The available data in the Get* methods is the + // the proposed state, which is the merged data of the prior state, + // practitioner configuration, and any CustomizeDiff field logic. The + // available data for the GetChange* and HasChange* methods is the prior + // state and proposed state. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. UpdateContext UpdateContextFunc + + // DeleteContext is called when the provider must destroy the instance of a + // managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Delete, DeleteContext, or + // DeleteWithoutTimeout should be implemented. + // + // The Context parameter stores SDK information, such as loggers and + // timeout deadlines. It also is wired to receive any cancellation from + // Terraform such as a system or practitioner sending SIGINT (Ctrl-c). + // + // By default, DeleteContext has a 20 minute timeout. Use the Timeouts + // field to control the default duration or implement DeleteWithoutTimeout + // instead of DeleteContext to remove the default timeout. + // + // The *ResourceData parameter contains the state data for this managed + // resource instance. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. DeleteContext DeleteContextFunc - // CreateWithoutTimeout is equivalent to CreateContext with no context timeout. + // CreateWithoutTimeout is called when the provider must create a new + // instance of a managed resource. This field is only valid when the + // Resource is a managed resource. Only one of Create, CreateContext, or + // CreateWithoutTimeout should be implemented. // // Most resources should prefer CreateContext with properly implemented // operation timeout values, however there are cases where operation @@ -153,9 +368,34 @@ type Resource struct { // logic, such as a mutex, to prevent remote system errors. Since these // operations would have an indeterminate timeout that scales with the // number of resources, this allows resources to control timeout behavior. + // + // The Context parameter stores SDK information, such as loggers. It also + // is wired to receive any cancellation from Terraform such as a system or + // practitioner sending SIGINT (Ctrl-c). + // + // The *ResourceData parameter contains the plan and state data for this + // managed resource instance. The available data in the Get* methods is the + // the proposed state, which is the merged data of the practitioner + // configuration and any CustomizeDiff field logic. + // + // The SetId method must be called with a non-empty value for the managed + // resource instance to be properly saved into the Terraform state and + // avoid a "inconsistent result after apply" error. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. CreateWithoutTimeout CreateContextFunc - // ReadWithoutTimeout is equivalent to ReadContext with no context timeout. + // ReadWithoutTimeout is called when the provider must refresh the state of + // a managed resource instance or data resource instance. This field is + // only valid when the Resource is a managed resource or data resource. + // Only one of Read, ReadContext, or ReadWithoutTimeout should be + // implemented. // // Most resources should prefer ReadContext with properly implemented // operation timeout values, however there are cases where operation @@ -163,9 +403,37 @@ type Resource struct { // logic, such as a mutex, to prevent remote system errors. Since these // operations would have an indeterminate timeout that scales with the // number of resources, this allows resources to control timeout behavior. + // + // The Context parameter stores SDK information, such as loggers. It also + // is wired to receive any cancellation from Terraform such as a system or + // practitioner sending SIGINT (Ctrl-c). + // + // The *ResourceData parameter contains the state data for this managed + // resource instance or data resource instance. + // + // Managed resources can signal to Terraform that the managed resource + // instance no longer exists and potentially should be recreated by calling + // the SetId method with an empty string ("") parameter and without + // returning an error. + // + // Data resources that are designed to return state for a singular + // infrastructure component should conventionally return an error if that + // infrastructure does not exist and omit any calls to the + // SetId method. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. ReadWithoutTimeout ReadContextFunc - // UpdateWithoutTimeout is equivalent to UpdateContext with no context timeout. + // UpdateWithoutTimeout is called when the provider must update an instance + // of a managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Update, UpdateContext, or + // UpdateWithoutTimeout should be implemented. // // Most resources should prefer UpdateContext with properly implemented // operation timeout values, however there are cases where operation @@ -173,9 +441,36 @@ type Resource struct { // logic, such as a mutex, to prevent remote system errors. Since these // operations would have an indeterminate timeout that scales with the // number of resources, this allows resources to control timeout behavior. + // + // This implementation is optional. If omitted, all Schema must enable + // the ForceNew field and any practitioner changes that would have + // caused and update will instead destroy and recreate the infrastructure + // compontent. + // + // The Context parameter stores SDK information, such as loggers. It also + // is wired to receive any cancellation from Terraform such as a system or + // practitioner sending SIGINT (Ctrl-c). + // + // The *ResourceData parameter contains the plan and state data for this + // managed resource instance. The available data in the Get* methods is the + // the proposed state, which is the merged data of the prior state, + // practitioner configuration, and any CustomizeDiff field logic. The + // available data for the GetChange* and HasChange* methods is the prior + // state and proposed state. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. UpdateWithoutTimeout UpdateContextFunc - // DeleteWithoutTimeout is equivalent to DeleteContext with no context timeout. + // DeleteWithoutTimeout is called when the provider must destroy the + // instance of a managed resource. This field is only valid when the + // Resource is a managed resource. Only one of Delete, DeleteContext, or + // DeleteWithoutTimeout should be implemented. // // Most resources should prefer DeleteContext with properly implemented // operation timeout values, however there are cases where operation @@ -183,15 +478,37 @@ type Resource struct { // logic, such as a mutex, to prevent remote system errors. Since these // operations would have an indeterminate timeout that scales with the // number of resources, this allows resources to control timeout behavior. + // + // The Context parameter stores SDK information, such as loggers. It also + // is wired to receive any cancellation from Terraform such as a system or + // practitioner sending SIGINT (Ctrl-c). + // + // The *ResourceData parameter contains the state data for this managed + // resource instance. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. DeleteWithoutTimeout DeleteContextFunc - // CustomizeDiff is a custom function for working with the diff that - // Terraform has created for this resource - it can be used to customize the - // diff that has been created, diff values not controlled by configuration, - // or even veto the diff altogether and abort the plan. It is passed a - // *ResourceDiff, a structure similar to ResourceData but lacking most write - // functions like Set, while introducing new functions that work with the - // diff such as SetNew, SetNewComputed, and ForceNew. + // CustomizeDiff is called after a difference (plan) has been generated + // for the Resource and allows for customizations, such as setting values + // not controlled by configuration, conditionally triggering resource + // recreation, or implementing additional validation logic to abort a plan. + // This field is only valid when the Resource is a managed resource. + // + // The Context parameter stores SDK information, such as loggers. It also + // is wired to receive any cancellation from Terraform such as a system or + // practitioner sending SIGINT (Ctrl-c). + // + // The *ResourceDiff parameter is similar to ResourceData but replaces the + // Set method with other difference handling methods, such as SetNew, + // SetNewComputed, and ForceNew. In general, only Schema with Computed + // enabled can have those methods executed against them. // // The phases Terraform runs this in, and the state available via functions // like Get and GetChange, are as follows: @@ -205,41 +522,60 @@ type Resource struct { // // This function needs to be resilient to support all scenarios. // - // For the most part, only computed fields can be customized by this - // function. + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. // - // This function is only allowed on regular resources (not data sources). + // The error return parameter, if not nil, will be converted into an error + // diagnostic when passed back to Terraform. CustomizeDiff CustomizeDiffFunc - // Importer is the ResourceImporter implementation for this resource. + // Importer is called when the provider must import an instance of a + // managed resource. This field is only valid when the Resource is a + // managed resource. + // // If this is nil, then this resource does not support importing. If // this is non-nil, then it supports importing and ResourceImporter // must be validated. The validity of ResourceImporter is verified // by InternalValidate on Resource. Importer *ResourceImporter - // If non-empty, this string is emitted as a warning during Validate. + // If non-empty, this string is emitted as the details of a warning + // diagnostic during validation (validate, plan, and apply operations). + // This field is only valid when the Resource is a managed resource or + // data resource. DeprecationMessage string - // Timeouts allow users to specify specific time durations in which an - // operation should time out, to allow them to extend an action to suit their - // usage. For example, a user may specify a large Creation timeout for their - // AWS RDS Instance due to it's size, or restoring from a snapshot. - // Resource implementors must enable Timeout support by adding the allowed - // actions (Create, Read, Update, Delete, Default) to the Resource struct, and - // accessing them in the matching methods. + // Timeouts configures the default time duration allowed before a create, + // read, update, or delete operation is considered timed out, which returns + // an error to practitioners. This field is only valid when the Resource is + // a managed resource or data resource. + // + // When implemented, practitioners can add a timeouts configuration block + // within their managed resource or data resource configuration to further + // customize the create, read, update, or delete operation timeouts. For + // example, a configuration may specify a longer create timeout for a + // database resource due to its data size. + // + // The ResourceData that is passed to create, read, update, and delete + // functionality can access the merged time duration of the Resource + // default timeouts configured in this field and the practitioner timeouts + // configuration via the Timeout method. Practitioner configuration + // always overrides any default values set here, whether shorter or longer. Timeouts *ResourceTimeout // Description is used as the description for docs, the language server and // other user facing usage. It can be plain-text or markdown depending on the - // global DescriptionKind setting. + // global DescriptionKind setting. This field is valid for any Resource. Description string // UseJSONNumber should be set when state upgraders will expect // json.Numbers instead of float64s for numbers. This is added as a // toggle for backwards compatibility for type assertions, but should // be used in all new resources to avoid bugs with sufficiently large - // user input. + // user input. This field is only valid when the Resource is a managed + // resource. // // See github.com/hashicorp/terraform-plugin-sdk/issues/655 for more // details. @@ -300,6 +636,7 @@ type DeleteContextFunc func(context.Context, *ResourceData, interface{}) diag.Di type StateMigrateFunc func( int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error) +// Implementation of a single schema version state upgrade. type StateUpgrader struct { // Version is the version schema that this Upgrader will handle, converting // it to Version+1. @@ -318,7 +655,36 @@ type StateUpgrader struct { Upgrade StateUpgradeFunc } -// See StateUpgrader +// Function signature for a schema version state upgrade handler. +// +// The Context parameter stores SDK information, such as loggers. It also +// is wired to receive any cancellation from Terraform such as a system or +// practitioner sending SIGINT (Ctrl-c). +// +// The map[string]interface{} parameter contains the previous schema version +// state data for a managed resource instance. The keys are top level attribute +// or block names mapped to values that can be type asserted similar to +// fetching values using the ResourceData Get* methods: +// +// - TypeBool: bool +// - TypeFloat: float +// - TypeInt: int +// - TypeList: []interface{} +// - TypeMap: map[string]interface{} +// - TypeSet: *Set +// - TypeString: string +// +// In certain scenarios, the map may be nil, so checking for that condition +// upfront is recommended to prevent potential panics. +// +// The interface{} parameter is the result of the Provider type +// ConfigureFunc field execution. If the Provider does not define +// a ConfigureFunc, this will be nil. This parameter is conventionally +// used to store API clients and other provider instance specific data. +// +// The map[string]interface{} return parameter should contain the upgraded +// schema version state data for a managed resource instance. Values must +// align to the typing mentioned above. type StateUpgradeFunc func(ctx context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) // See Resource documentation. @@ -412,16 +778,16 @@ func (r *Resource) Apply( rt := ResourceTimeout{} if _, ok := d.Meta[TimeoutKey]; ok { if err := rt.DiffDecode(d); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + logging.HelperSchemaError(ctx, "Error decoding ResourceTimeout", map[string]interface{}{logging.KeyError: err}) } } else if s != nil { if _, ok := s.Meta[TimeoutKey]; ok { if err := rt.StateDecode(s); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + logging.HelperSchemaError(ctx, "Error decoding ResourceTimeout", map[string]interface{}{logging.KeyError: err}) } } } else { - log.Printf("[DEBUG] No meta timeoutkey found in Apply()") + logging.HelperSchemaDebug(ctx, "No meta timeoutkey found in Apply()") } data.timeouts = &rt @@ -436,7 +802,10 @@ func (r *Resource) Apply( if d.Destroy || d.RequiresNew() { if s.ID != "" { // Destroy the resource since it is created + logging.HelperSchemaTrace(ctx, "Calling downstream") diags = append(diags, r.delete(ctx, data, meta)...) + logging.HelperSchemaTrace(ctx, "Called downstream") + if diags.HasError() { return r.recordCurrentSchemaVersion(data.State()), diags } @@ -464,7 +833,9 @@ func (r *Resource) Apply( if data.Id() == "" { // We're creating, it is a new resource. data.MarkNewResource() + logging.HelperSchemaTrace(ctx, "Calling downstream") diags = append(diags, r.create(ctx, data, meta)...) + logging.HelperSchemaTrace(ctx, "Called downstream") } else { if !r.updateFuncSet() { return s, append(diags, diag.Diagnostic{ @@ -472,7 +843,9 @@ func (r *Resource) Apply( Summary: "doesn't support update", }) } + logging.HelperSchemaTrace(ctx, "Calling downstream") diags = append(diags, r.update(ctx, data, meta)...) + logging.HelperSchemaTrace(ctx, "Called downstream") } return r.recordCurrentSchemaVersion(data.State()), diags @@ -499,10 +872,10 @@ func (r *Resource) Diff( if instanceDiff != nil { if err := t.DiffEncode(instanceDiff); err != nil { - log.Printf("[ERR] Error encoding timeout to instance diff: %s", err) + logging.HelperSchemaError(ctx, "Error encoding timeout to instance diff", map[string]interface{}{logging.KeyError: err}) } } else { - log.Printf("[DEBUG] Instance Diff is nil in Diff()") + logging.HelperSchemaDebug(ctx, "Instance Diff is nil in Diff()") } return instanceDiff, err @@ -566,7 +939,10 @@ func (r *Resource) ReadDataApply( return nil, diag.FromErr(err) } + logging.HelperSchemaTrace(ctx, "Calling downstream") diags := r.read(ctx, data, meta) + logging.HelperSchemaTrace(ctx, "Called downstream") + state := data.State() if state != nil && state.ID == "" { // Data sources can set an ID if they want, but they aren't @@ -595,7 +971,7 @@ func (r *Resource) RefreshWithoutUpgrade( rt := ResourceTimeout{} if _, ok := s.Meta[TimeoutKey]; ok { if err := rt.StateDecode(s); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + logging.HelperSchemaError(ctx, "Error decoding ResourceTimeout", map[string]interface{}{logging.KeyError: err}) } } @@ -612,7 +988,10 @@ func (r *Resource) RefreshWithoutUpgrade( data.providerMeta = s.ProviderMeta } + logging.HelperSchemaTrace(ctx, "Calling downstream") exists, err := r.Exists(data, meta) + logging.HelperSchemaTrace(ctx, "Called downstream") + if err != nil { return s, diag.FromErr(err) } @@ -632,12 +1011,16 @@ func (r *Resource) RefreshWithoutUpgrade( data.providerMeta = s.ProviderMeta } + logging.HelperSchemaTrace(ctx, "Calling downstream") diags := r.read(ctx, data, meta) + logging.HelperSchemaTrace(ctx, "Called downstream") + state := data.State() if state != nil && state.ID == "" { state = nil } + schemaMap(r.Schema).handleDiffSuppressOnRefresh(ctx, s, state) return r.recordCurrentSchemaVersion(state), diags } @@ -737,8 +1120,8 @@ func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error } } - for k, f := range tsm { - if isReservedResourceFieldName(k, f) { + for k := range tsm { + if isReservedResourceFieldName(k) { return fmt.Errorf("%s is a reserved field name", k) } } @@ -844,13 +1227,13 @@ func validateResourceID(s *Schema) error { // ID should at least be computed. If unspecified it will be set to Computed and Optional, // but Optional is unnecessary if undesired. - if s.Computed != true { + if !s.Computed { return fmt.Errorf(`the "id" attribute must be marked Computed`) } return nil } -func isReservedResourceFieldName(name string, s *Schema) bool { +func isReservedResourceFieldName(name string) bool { for _, reservedName := range ReservedResourceFields { if name == reservedName { return true diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go index 46ac3f2a4..396b5bc6f 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go @@ -528,7 +528,7 @@ func (d *ResourceData) getChange( func (d *ResourceData) get(addr []string, source getSource) getResult { d.once.Do(d.init) - level := "set" + var level string flags := source & ^getSourceLevelMask exact := flags&getSourceExact != 0 source = source & getSourceLevelMask diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go index 1fdc48cca..27575b297 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go @@ -270,16 +270,16 @@ func (d *ResourceDiff) GetChangedKeysPrefix(prefix string) []string { // diffChange helps to implement resourceDiffer and derives its change values // from ResourceDiff's own change data, in addition to existing diff, config, and state. func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, bool, bool) { - old, new, customized := d.getChange(key) + oldValue, newValue, customized := d.getChange(key) - if !old.Exists { - old.Value = nil + if !oldValue.Exists { + oldValue.Value = nil } - if !new.Exists || d.removed(key) { - new.Value = nil + if !newValue.Exists || d.removed(key) { + newValue.Value = nil } - return old.Value, new.Value, !reflect.DeepEqual(old.Value, new.Value), new.Computed, customized + return oldValue.Value, newValue.Value, !reflect.DeepEqual(oldValue.Value, newValue.Value), newValue.Computed, customized } // SetNew is used to set a new diff value for the mentioned key. The value must @@ -308,12 +308,12 @@ func (d *ResourceDiff) SetNewComputed(key string) error { } // setDiff performs common diff setting behaviour. -func (d *ResourceDiff) setDiff(key string, new interface{}, computed bool) error { +func (d *ResourceDiff) setDiff(key string, newValue interface{}, computed bool) error { if err := d.clear(key); err != nil { return err } - if err := d.newWriter.WriteField(strings.Split(key, "."), new, computed); err != nil { + if err := d.newWriter.WriteField(strings.Split(key, "."), newValue, computed); err != nil { return fmt.Errorf("Cannot set new diff value for key %s: %s", key, err) } @@ -374,8 +374,8 @@ func (d *ResourceDiff) Get(key string) interface{} { // results from the exact levels for the new diff, then from state and diff as // per normal. func (d *ResourceDiff) GetChange(key string) (interface{}, interface{}) { - old, new, _ := d.getChange(key) - return old.Value, new.Value + oldValue, newValue, _ := d.getChange(key) + return oldValue.Value, newValue.Value } // GetOk functions the same way as ResourceData.GetOk, but it also checks the @@ -422,19 +422,29 @@ func (d *ResourceDiff) NewValueKnown(key string) bool { return !r.Computed } +// HasChanges returns whether or not any of the given keys has been changed. +func (d *ResourceDiff) HasChanges(keys ...string) bool { + for _, key := range keys { + if d.HasChange(key) { + return true + } + } + return false +} + // HasChange checks to see if there is a change between state and the diff, or // in the overridden diff. func (d *ResourceDiff) HasChange(key string) bool { - old, new := d.GetChange(key) + oldValue, newValue := d.GetChange(key) // If the type implements the Equal interface, then call that // instead of just doing a reflect.DeepEqual. An example where this is // needed is *Set - if eq, ok := old.(Equal); ok { - return !eq.Equal(new) + if eq, ok := oldValue.(Equal); ok { + return !eq.Equal(newValue) } - return !reflect.DeepEqual(old, new) + return !reflect.DeepEqual(oldValue, newValue) } // Id returns the ID of this resource. @@ -506,16 +516,16 @@ func (d *ResourceDiff) GetRawPlan() cty.Value { // results from the exact levels for the new diff, then from state and diff as // per normal. func (d *ResourceDiff) getChange(key string) (getResult, getResult, bool) { - old := d.get(strings.Split(key, "."), "state") - var new getResult + oldValue := d.get(strings.Split(key, "."), "state") + var newValue getResult for p := range d.updatedKeys { if childAddrOf(key, p) { - new = d.getExact(strings.Split(key, "."), "newDiff") - return old, new, true + newValue = d.getExact(strings.Split(key, "."), "newDiff") + return oldValue, newValue, true } } - new = d.get(strings.Split(key, "."), "newDiff") - return old, new, false + newValue = d.get(strings.Split(key, "."), "newDiff") + return oldValue, newValue, false } // removed checks to see if the key is present in the existing, pre-customized diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go index cee0b6781..2594677f5 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go @@ -137,7 +137,13 @@ func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig) *timeout = rt } - return nil + + // This early return, which makes this function handle a single + // timeout configuration block, should likely not be here but the + // SDK has never raised an error for multiple blocks nor made any + // precedence decisions for them in the past. + // It is left here for compatibility reasons. + return nil //nolint:staticcheck } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go index 8d5c22e04..7cbd58589 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go @@ -23,17 +23,24 @@ import ( "strings" "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-log/tfsdklog" "github.com/mitchellh/copystructure" "github.com/mitchellh/mapstructure" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -// Schema is used to describe the structure of a value. +// Schema describes the structure and type information of a value, whether +// sourced from configuration, plan, or state data. Schema is used in Provider +// and Resource types (for managed resources and data resources) and is +// fundamental to the implementations of ResourceData and ResourceDiff. // -// Read the documentation of the struct elements for important details. +// The Type field must always be set. At least one of Required, Optional, +// Optional and Computed, or Computed must be enabled unless the Schema is +// directly an implementation of an Elem field of another Schema. type Schema struct { // Type is the type of the value and must be one of the ValueType values. // @@ -71,14 +78,37 @@ type Schema struct { // behavior, and SchemaConfigModeBlock is not permitted. ConfigMode SchemaConfigMode - // If one of these is set, then this item can come from the configuration. - // Both cannot be set. If Optional is set, the value is optional. If - // Required is set, the value is required. + // Required indicates whether the practitioner must enter a value in the + // configuration for this attribute. Required cannot be used with Computed + // Default, DefaultFunc, DiffSuppressFunc, DiffSuppressOnRefresh, + // InputDefault, Optional, or StateFunc. At least one of Required, + // Optional, Optional and Computed, or Computed must be enabled. + Required bool + + // Optional indicates whether the practitioner can choose to not enter + // a value in the configuration for this attribute. Optional cannot be used + // with Required. // - // One of these must be set if the value is not computed. That is: - // value either comes from the config, is computed, or is both. + // If also using Default or DefaultFunc, Computed should also be enabled, + // otherwise Terraform can output warning logs or "inconsistent result + // after apply" errors. Optional bool - Required bool + + // Computed indicates whether the provider may return its own value for + // this attribute or not. Computed cannot be used with Required. If + // Required and Optional are both false, the attribute will be considered + // "read only" for the practitioner, with only the provider able to set + // its value. + Computed bool + + // ForceNew indicates whether a change in this value requires the + // replacement (destroy and create) of the managed resource instance, + // rather than an in-place update. This field is only valid when the + // encapsulating Resource is a managed resource. + // + // If conditional replacement logic is needed, use the Resource type + // CustomizeDiff field to call the ResourceDiff type ForceNew method. + ForceNew bool // If this is non-nil, the provided function will be used during diff // of this field. If this is nil, a default diff for the type of the @@ -87,31 +117,74 @@ type Schema struct { // This allows comparison based on something other than primitive, list // or map equality - for example SSH public keys may be considered // equivalent regardless of trailing whitespace. + // + // If CustomizeDiffFunc makes this field ForceNew=true, the + // following DiffSuppressFunc will come in with the value of old being + // empty, as if creating a new resource. + // + // By default, DiffSuppressFunc is considered only when deciding whether + // a configuration value is significantly different than the prior state + // value during planning. Set DiffSuppressOnRefresh to opt in to checking + // this also during the refresh step. DiffSuppressFunc SchemaDiffSuppressFunc - // If this is non-nil, then this will be a default value that is used - // when this item is not set in the configuration. + // DiffSuppressOnRefresh enables using the DiffSuppressFunc to ignore + // normalization-classified changes returned by the resource type's + // "Read" or "ReadContext" function, in addition to the default behavior of + // doing so during planning. + // + // This is a particularly good choice for attributes which take strings + // containing "microsyntaxes" where various different values are packed + // together in some serialization where there are many ways to express the + // same information. For example, attributes which accept JSON data can + // include different whitespace characters without changing meaning, and + // case-insensitive identifiers may refer to the same object using different + // characters. // - // DefaultFunc can be specified to compute a dynamic default. - // Only one of Default or DefaultFunc can be set. If DefaultFunc is - // used then its return value should be stable to avoid generating - // confusing/perpetual diffs. + // This is valid only for attributes of primitive types, because + // DiffSuppressFunc itself is only compatible with primitive types. // - // Changing either Default or the return value of DefaultFunc can be - // a breaking change, especially if the attribute in question has - // ForceNew set. If a default needs to change to align with changing - // assumptions in an upstream API then it may be necessary to also use - // the MigrateState function on the resource to change the state to match, - // or have the Read function adjust the state value to align with the - // new default. + // The key benefit of activating this flag is that the result of Read or + // ReadContext will be cleaned of normalization-only changes in the same + // way as the planning result would normaly be, which therefore prevents + // churn for downstream expressions deriving from this attribute and + // prevents incorrect "Values changed outside of Terraform" messages + // when the remote API returns values which have the same meaning as the + // prior state but in a different serialization. // - // If Required is true above, then Default cannot be set. DefaultFunc - // can be set with Required. If the DefaultFunc returns nil, then there - // will be no default and the user will be asked to fill it in. + // This is an opt-in because it was a later addition to the DiffSuppressFunc + // functionality which would cause some significant changes in behavior + // for existing providers if activated everywhere all at once. + DiffSuppressOnRefresh bool + + // Default indicates a value to set if this attribute is not set in the + // configuration. Default cannot be used with DefaultFunc or Required. + // Default is only supported if the Type is TypeBool, TypeFloat, TypeInt, + // or TypeString. Default cannot be used if the Schema is directly an + // implementation of an Elem field of another Schema, such as trying to + // set a default value for a TypeList or TypeSet. // - // If either of these is set, then the user won't be asked for input - // for this key if the default is not nil. - Default interface{} + // Changing either Default can be a breaking change, especially if the + // attribute has ForceNew enabled. If a default needs to change to align + // with changing assumptions in an upstream API, then it may be necessary + // to also implement resource state upgrade functionality to change the + // state to match or update read operation logic to align with the new + // default. + Default interface{} + + // DefaultFunc can be specified to compute a dynamic default when this + // attribute is not set in the configuration. DefaultFunc cannot be used + // with Default. For legacy reasons, DefaultFunc can be used with Required + // attributes in a Provider schema, which will prompt practitioners for + // input if the result of this function is nil. + // + // The return value should be stable to avoid generating confusing + // plan differences. Changing the return value can be a breaking change, + // especially if ForceNew is enabled. If a default needs to change to align + // with changing assumptions in an upstream API, then it may be necessary + // to also implement resource state upgrade functionality to change the + // state to match or update read operation logic to align with the new + // default. DefaultFunc SchemaDefaultFunc // Description is used as the description for docs, the language server and @@ -124,85 +197,125 @@ type Schema struct { // asked for. If Input is asked, this will be the default value offered. InputDefault string - // The fields below relate to diffs. - // - // If Computed is true, then the result of this value is computed - // (unless specified by config) on creation. - // - // If ForceNew is true, then a change in this resource necessitates - // the creation of a new resource. - // // StateFunc is a function called to change the value of this before // storing it in the state (and likewise before comparing for diffs). // The use for this is for example with large strings, you may want // to simply store the hash of it. - Computed bool - ForceNew bool StateFunc SchemaStateFunc - // The following fields are only set for a TypeList, TypeSet, or TypeMap. + // Elem represents the element type for a TypeList, TypeSet, or TypeMap + // attribute or block. The only valid types are *Schema and *Resource. + // Only TypeList and TypeSet support *Resource. + // + // If the Elem is a *Schema, the surrounding Schema represents a single + // attribute with a single element type for underlying elements. In + // practitioner configurations, an equals sign (=) is required to set + // the value. Refer to the following documentation: + // + // https://www.terraform.io/docs/language/syntax/configuration.html + // + // The underlying *Schema is only required to implement Type. ValidateFunc + // or ValidateDiagFunc can be used to validate each element value. + // + // If the Elem is a *Resource, the surrounding Schema represents a + // configuration block. Blocks can contain underlying attributes or blocks. + // In practitioner configurations, an equals sign (=) cannot be used to + // set the value. Blocks are instead repeated as necessary, or require + // the use of dynamic block expressions. Refer to the following + // documentation: // - // Elem represents the element type. For a TypeMap, it must be a *Schema - // with a Type that is one of the primitives: TypeString, TypeBool, - // TypeInt, or TypeFloat. Otherwise it may be either a *Schema or a - // *Resource. If it is *Schema, the element type is just a simple value. - // If it is *Resource, the element type is a complex structure, - // potentially managed via its own CRUD actions on the API. + // https://www.terraform.io/docs/language/syntax/configuration.html + // https://www.terraform.io/docs/language/expressions/dynamic-blocks.html + // + // The underlying *Resource must only implement the Schema field. Elem interface{} - // The following fields are only set for a TypeList or TypeSet. - // // MaxItems defines a maximum amount of items that can exist within a - // TypeSet or TypeList. Specific use cases would be if a TypeSet is being - // used to wrap a complex structure, however more than one instance would - // cause instability. - // + // TypeSet or TypeList. + MaxItems int + // MinItems defines a minimum amount of items that can exist within a - // TypeSet or TypeList. Specific use cases would be if a TypeSet is being - // used to wrap a complex structure, however less than one instance would - // cause instability. + // TypeSet or TypeList. // // If the field Optional is set to true then MinItems is ignored and thus // effectively zero. - MaxItems int MinItems int - // The following fields are only valid for a TypeSet type. - // - // Set defines a function to determine the unique ID of an item so that - // a proper set can be built. + // Set defines custom hash algorithm for each TypeSet element. If not + // defined, the SDK implements a default hash algorithm based on the + // underlying structure and type information of the Elem field. Set SchemaSetFunc // ComputedWhen is a set of queries on the configuration. Whenever any // of these things is changed, it will require a recompute (this requires // that Computed is set to true). // - // NOTE: This currently does not work. + // Deprecated: This functionality is not implemented and this field + // declaration should be removed. ComputedWhen []string - // ConflictsWith is a set of schema keys that conflict with this schema. - // This will only check that they're set in the _config_. This will not - // raise an error for a malfunctioning resource that sets a conflicting - // key. - // - // ExactlyOneOf is a set of schema keys that, when set, only one of the - // keys in that list can be specified. It will error if none are - // specified as well. + // ConflictsWith is a set of attribute paths, including this attribute, + // whose configurations cannot be set simultaneously. This implements the + // validation logic declaratively within the schema and can trigger earlier + // in Terraform operations, rather than using create or update logic which + // only triggers during apply. // - // AtLeastOneOf is a set of schema keys that, when set, at least one of - // the keys in that list must be specified. - // - // RequiredWith is a set of schema keys that must be set simultaneously. + // Only absolute attribute paths, ones starting with top level attribute + // names, are supported. Attribute paths cannot be accurately declared + // for TypeList (if MaxItems is greater than 1), TypeMap, or TypeSet + // attributes. To reference an attribute under a single configuration block + // (TypeList with Elem of *Resource and MaxItems of 1), the syntax is + // "parent_block_name.0.child_attribute_name". ConflictsWith []string - ExactlyOneOf []string - AtLeastOneOf []string - RequiredWith []string - // When Deprecated is set, this attribute is deprecated. + // ExactlyOneOf is a set of attribute paths, including this attribute, + // where only one attribute out of all specified can be configured. It will + // return a validation error if none are specified as well. This implements + // the validation logic declaratively within the schema and can trigger + // earlier in Terraform operations, rather than using create or update + // logic which only triggers during apply. + // + // Only absolute attribute paths, ones starting with top level attribute + // names, are supported. Attribute paths cannot be accurately declared + // for TypeList (if MaxItems is greater than 1), TypeMap, or TypeSet + // attributes. To reference an attribute under a single configuration block + // (TypeList with Elem of *Resource and MaxItems of 1), the syntax is + // "parent_block_name.0.child_attribute_name". + ExactlyOneOf []string + + // AtLeastOneOf is a set of attribute paths, including this attribute, + // in which at least one of the attributes must be configured. This + // implements the validation logic declaratively within the schema and can + // trigger earlier in Terraform operations, rather than using create or + // update logic which only triggers during apply. // - // A deprecated field still works, but will probably stop working in near - // future. This string is the message shown to the user with instructions on - // how to address the deprecation. + // Only absolute attribute paths, ones starting with top level attribute + // names, are supported. Attribute paths cannot be accurately declared + // for TypeList (if MaxItems is greater than 1), TypeMap, or TypeSet + // attributes. To reference an attribute under a single configuration block + // (TypeList with Elem of *Resource and MaxItems of 1), the syntax is + // "parent_block_name.0.child_attribute_name". + AtLeastOneOf []string + + // RequiredWith is a set of attribute paths, including this attribute, + // that must be set simultaneously. This implements the validation logic + // declaratively within the schema and can trigger earlier in Terraform + // operations, rather than using create or update logic which only triggers + // during apply. + // + // Only absolute attribute paths, ones starting with top level attribute + // names, are supported. Attribute paths cannot be accurately declared + // for TypeList (if MaxItems is greater than 1), TypeMap, or TypeSet + // attributes. To reference an attribute under a single configuration block + // (TypeList with Elem of *Resource and MaxItems of 1), the syntax is + // "parent_block_name.0.child_attribute_name". + RequiredWith []string + + // Deprecated indicates the message to include in a warning diagnostic to + // practitioners when this attribute is configured. Typically this is used + // to signal that this attribute will be removed in the future and provide + // next steps to the practitioner, such as using a different attribute, + // different resource, or if it should just be removed. Deprecated string // ValidateFunc allows individual fields to define arbitrary validation @@ -238,9 +351,28 @@ type Schema struct { ValidateDiagFunc SchemaValidateDiagFunc // Sensitive ensures that the attribute's value does not get displayed in - // logs or regular output. It should be used for passwords or other - // secret fields. Future versions of Terraform may encrypt these - // values. + // the Terraform user interface output. It should be used for password or + // other values which should be hidden. + // + // Terraform does not support conditional sensitivity, so if the value may + // only be sensitive in certain scenarios, a pragmatic choice will be + // necessary upfront of whether or not to always hide the value. Some + // providers may opt to split up resources based on sensitivity, to ensure + // that practitioners without sensitive values do not have values + // unnecessarily hidden. + // + // Terraform does not support passing sensitivity from configurations to + // providers. For example, if a sensitive value is configured via another + // attribute, this attribute is not marked Sensitive, and the value is used + // in this attribute value, the sensitivity is not transitive. The value + // will be displayed as normal. + // + // Sensitive values propagate when referenced in other parts of a + // configuration unless the nonsensitive() configuration function is used. + // Certain configuration usage may also expand the sensitivity. For + // example, including the sensitive value in a set may mark the whole set + // as sensitive. Any outputs containing a sensitive value must enable the + // output sensitive argument. Sensitive bool } @@ -260,7 +392,7 @@ const ( // suppress it from the plan if necessary. // // Return true if the diff should be suppressed, false to retain it. -type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool +type SchemaDiffSuppressFunc func(k, oldValue, newValue string, d *ResourceData) bool // SchemaDefaultFunc is a function called to return a default value for // a field. @@ -487,11 +619,11 @@ func (m schemaMap) Data( // DeepCopy returns a copy of this schemaMap. The copy can be safely modified // without affecting the original. func (m *schemaMap) DeepCopy() schemaMap { - copy, err := copystructure.Config{Lock: true}.Copy(m) + copiedMap, err := copystructure.Config{Lock: true}.Copy(m) if err != nil { panic(err) } - return *copy.(*schemaMap) + return *copiedMap.(*schemaMap) } // Diff returns the diff for a resource given the schema map, @@ -522,7 +654,7 @@ func (m schemaMap) Diff( } for k, schema := range m { - err := m.diff(k, schema, result, d, false) + err := m.diff(ctx, k, schema, result, d, false) if err != nil { return nil, err } @@ -540,11 +672,16 @@ func (m schemaMap) Diff( if !result.DestroyTainted && customizeDiff != nil { mc := m.DeepCopy() rd := newResourceDiff(mc, c, s, result) - if err := customizeDiff(ctx, rd, meta); err != nil { + + logging.HelperSchemaTrace(ctx, "Calling downstream") + err := customizeDiff(ctx, rd, meta) + logging.HelperSchemaTrace(ctx, "Called downstream") + + if err != nil { return nil, err } for _, k := range rd.UpdatedKeys() { - err := m.diff(k, mc[k], result, rd, false) + err := m.diff(ctx, k, mc[k], result, rd, false) if err != nil { return nil, err } @@ -571,7 +708,7 @@ func (m schemaMap) Diff( // Perform the diff again for k, schema := range m { - err := m.diff(k, schema, result2, d, false) + err := m.diff(ctx, k, schema, result2, d, false) if err != nil { return nil, err } @@ -585,7 +722,7 @@ func (m schemaMap) Diff( return nil, err } for _, k := range rd.UpdatedKeys() { - err := m.diff(k, mc[k], result2, rd, false) + err := m.diff(ctx, k, mc[k], result2, rd, false) if err != nil { return nil, err } @@ -757,6 +894,10 @@ func (m schemaMap) internalValidate(topSchemaMap schemaMap, attrsOnly bool) erro } } + if v.DiffSuppressOnRefresh && v.DiffSuppressFunc == nil { + return fmt.Errorf("%s: cannot set DiffSuppressOnRefresh without DiffSuppressFunc", k) + } + if v.Type == TypeList || v.Type == TypeSet { if v.Elem == nil { return fmt.Errorf("%s: Elem must be set for lists", k) @@ -941,10 +1082,12 @@ type resourceDiffer interface { GetChange(string) (interface{}, interface{}) GetOk(string) (interface{}, bool) HasChange(string) bool + HasChanges(...string) bool Id() string } func (m schemaMap) diff( + ctx context.Context, k string, schema *Schema, diff *terraform.InstanceDiff, @@ -959,11 +1102,11 @@ func (m schemaMap) diff( case TypeBool, TypeInt, TypeFloat, TypeString: err = m.diffString(k, schema, unsupressedDiff, d, all) case TypeList: - err = m.diffList(k, schema, unsupressedDiff, d, all) + err = m.diffList(ctx, k, schema, unsupressedDiff, d, all) case TypeMap: err = m.diffMap(k, schema, unsupressedDiff, d, all) case TypeSet: - err = m.diffSet(k, schema, unsupressedDiff, d, all) + err = m.diffSet(ctx, k, schema, unsupressedDiff, d, all) default: err = fmt.Errorf("%s: unknown type %#v", k, schema.Type) } @@ -980,6 +1123,7 @@ func (m schemaMap) diff( continue } + logging.HelperSchemaDebug(ctx, "Ignoring change due to DiffSuppressFunc", map[string]interface{}{logging.KeyAttributePath: attrK}) attrV = &terraform.ResourceAttrDiff{ Old: attrV.Old, New: attrV.Old, @@ -993,6 +1137,7 @@ func (m schemaMap) diff( } func (m schemaMap) diffList( + ctx context.Context, k string, schema *Schema, diff *terraform.InstanceDiff, @@ -1091,7 +1236,7 @@ func (m schemaMap) diffList( for i := 0; i < maxLen; i++ { for k2, schema := range t.Schema { subK := fmt.Sprintf("%s.%d.%s", k, i, k2) - err := m.diff(subK, schema, diff, d, all) + err := m.diff(ctx, subK, schema, diff, d, all) if err != nil { return err } @@ -1107,7 +1252,7 @@ func (m schemaMap) diffList( // just diff each. for i := 0; i < maxLen; i++ { subK := fmt.Sprintf("%s.%d", k, i) - err := m.diff(subK, &t2, diff, d, all) + err := m.diff(ctx, subK, &t2, diff, d, all) if err != nil { return err } @@ -1232,6 +1377,7 @@ func (m schemaMap) diffMap( } func (m schemaMap) diffSet( + ctx context.Context, k string, schema *Schema, diff *terraform.InstanceDiff, @@ -1337,7 +1483,7 @@ func (m schemaMap) diffSet( // This is a complex resource for k2, schema := range t.Schema { subK := fmt.Sprintf("%s.%s.%s", k, code, k2) - err := m.diff(subK, schema, diff, d, true) + err := m.diff(ctx, subK, schema, diff, d, true) if err != nil { return err } @@ -1351,7 +1497,7 @@ func (m schemaMap) diffSet( // This is just a primitive element, so go through each and // just diff each. subK := fmt.Sprintf("%s.%s", k, code) - err := m.diff(subK, &t2, diff, d, true) + err := m.diff(ctx, subK, &t2, diff, d, true) if err != nil { return err } @@ -1426,6 +1572,60 @@ func (m schemaMap) diffString( return nil } +// handleDiffSuppressOnRefresh visits each of the attributes set in "new" and, +// if the corresponding schema sets both DiffSuppressFunc and +// DiffSuppressOnRefresh, checks whether the new value is materially different +// than the old and if not it overwrites the new value with the old one, +// in-place. +func (m schemaMap) handleDiffSuppressOnRefresh(ctx context.Context, oldState, newState *terraform.InstanceState) { + if newState == nil || oldState == nil { + return // nothing to do, then + } + + // We'll populate this in the loop below only if we find at least one + // attribute which needs this analysis. + var d *ResourceData + + oldAttrs := oldState.Attributes + newAttrs := newState.Attributes + for k, newV := range newAttrs { + oldV, ok := oldAttrs[k] + if !ok { + continue // no old value to compare with + } + if newV == oldV { + continue // no change to test + } + + schemaList := addrToSchema(strings.Split(k, "."), m) + if len(schemaList) == 0 { + continue // no schema? weird, but not our responsibility to handle + } + schema := schemaList[len(schemaList)-1] + if !schema.DiffSuppressOnRefresh || schema.DiffSuppressFunc == nil { + continue // not relevant + } + + if d == nil { + // We populate "d" only on demand, to avoid the cost for most + // existing schemas where DiffSuppressOnRefresh won't be set. + var err error + d, err = m.Data(newState, nil) + if err != nil { + // Should not happen if we got far enough to be doing this + // analysis, but if it does then we'll bail out. + tfsdklog.Warn(ctx, fmt.Sprintf("schemaMap.handleDiffSuppressOnRefresh failed to construct ResourceData: %s", err)) + return + } + } + + if schema.DiffSuppressFunc(k, oldV, newV, d) { + tfsdklog.Debug(ctx, fmt.Sprintf("ignoring change of %q due to DiffSuppressFunc", k)) + newState.Attributes[k] = oldV // keep the old value, then + } + } +} + func (m schemaMap) validate( k string, schema *Schema, @@ -2048,8 +2248,19 @@ func (m schemaMap) validatePrimitive( // decode a float as an integer. // the config shims only use int for integral number values + // also accept a string, just as the TypeBool and TypeFloat cases do if v, ok := raw.(int); ok { decoded = v + } else if _, ok := raw.(string); ok { + var n int + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: path, + }) + } + decoded = n } else { return append(diags, diag.Diagnostic{ Severity: diag.Error, @@ -2058,7 +2269,7 @@ func (m schemaMap) validatePrimitive( }) } case TypeFloat: - // Verify that we can parse this as an int + // Verify that we can parse this as a float var n float64 if err := mapstructure.WeakDecode(raw, &n); err != nil { return append(diags, diag.Diagnostic{ diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go index 48755c22d..b937ab6e5 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go @@ -3,12 +3,12 @@ package schema import ( "bytes" "fmt" - "github.com/google/go-cmp/cmp" "reflect" "sort" "strconv" "sync" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode" ) @@ -238,6 +238,6 @@ func (s *Set) listCode() []string { for k := range s.m { keys = append(keys, k) } - sort.Sort(sort.StringSlice(keys)) + sort.Strings(keys) return keys } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure/suppress_json_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure/suppress_json_diff.go index 741ca0ac2..c99b73846 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure/suppress_json_diff.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure/suppress_json_diff.go @@ -6,13 +6,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func SuppressJsonDiff(k, old, new string, d *schema.ResourceData) bool { - oldMap, err := ExpandJsonFromString(old) +func SuppressJsonDiff(k, oldValue, newValue string, d *schema.ResourceData) bool { + oldMap, err := ExpandJsonFromString(oldValue) if err != nil { return false } - newMap, err := ExpandJsonFromString(new) + newMap, err := ExpandJsonFromString(newValue) if err != nil { return false } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/map.go index 3e8068a8c..9e4510b1e 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/map.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/map.go @@ -17,12 +17,12 @@ func MapKeyLenBetween(min, max int) schema.SchemaValidateDiagFunc { var diags diag.Diagnostics for _, key := range sortedKeys(v.(map[string]interface{})) { - len := len(key) - if len < min || len > max { + keyLen := len(key) + if keyLen < min || keyLen > max { diags = append(diags, diag.Diagnostic{ Severity: diag.Error, Summary: "Bad map key length", - Detail: fmt.Sprintf("Map key lengths should be in the range (%d - %d): %s (length = %d)", min, max, key, len), + Detail: fmt.Sprintf("Map key lengths should be in the range (%d - %d): %s (length = %d)", min, max, key, keyLen), AttributePath: append(path, cty.IndexStep{Key: cty.StringVal(key)}), }) } @@ -53,12 +53,12 @@ func MapValueLenBetween(min, max int) schema.SchemaValidateDiagFunc { continue } - len := len(val.(string)) - if len < min || len > max { + valLen := len(val.(string)) + if valLen < min || valLen > max { diags = append(diags, diag.Diagnostic{ Severity: diag.Error, Summary: "Bad map value length", - Detail: fmt.Sprintf("Map value lengths should be in the range (%d - %d): %s => %v (length = %d)", min, max, key, val, len), + Detail: fmt.Sprintf("Map value lengths should be in the range (%d - %d): %s => %v (length = %d)", min, max, key, val, valLen), AttributePath: append(path, cty.IndexStep{Key: cty.StringVal(key)}), }) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/meta.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/meta.go index f1376c2d3..b0515c8d0 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/meta.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/meta.go @@ -66,8 +66,21 @@ func ToDiagFunc(validator schema.SchemaValidateFunc) schema.SchemaValidateDiagFu return func(i interface{}, p cty.Path) diag.Diagnostics { var diags diag.Diagnostics - attr := p[len(p)-1].(cty.GetAttrStep) - ws, es := validator(i, attr.Name) + // A practitioner-friendly key for any SchemaValidateFunc output. + // Generally this should be the last attribute name on the path. + // If not found for some unexpected reason, an empty string is fine + // as the diagnostic will have the full attribute path anyways. + var key string + + // Reverse search for last cty.GetAttrStep + for i := len(p) - 1; i >= 0; i-- { + if pathStep, ok := p[i].(cty.GetAttrStep); ok { + key = pathStep.Name + break + } + } + + ws, es := validator(i, key) for _, w := range ws { diags = append(diags, diag.Diagnostic{ diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/strings.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/strings.go index c0c17d011..e739a1a1b 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/strings.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/strings.go @@ -138,7 +138,7 @@ func StringInSlice(valid []string, ignoreCase bool) schema.SchemaValidateFunc { } for _, str := range valid { - if v == str || (ignoreCase && strings.ToLower(v) == strings.ToLower(str)) { + if v == str || (ignoreCase && strings.EqualFold(v, str)) { return warnings, errors } } @@ -160,7 +160,7 @@ func StringNotInSlice(invalid []string, ignoreCase bool) schema.SchemaValidateFu } for _, str := range invalid { - if v == str || (ignoreCase && strings.ToLower(v) == strings.ToLower(str)) { + if v == str || (ignoreCase && strings.EqualFold(v, str)) { errors = append(errors, fmt.Errorf("expected %s to not be any of %v, got %s", k, invalid, v)) return warnings, errors } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/testing.go index 596c5754a..d861f5a2a 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/testing.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/testing.go @@ -5,7 +5,6 @@ import ( testing "github.com/mitchellh/go-testing-interface" - "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -16,12 +15,6 @@ type testCase struct { expectedErr *regexp.Regexp } -type diagTestCase struct { - val interface{} - f schema.SchemaValidateDiagFunc - expectedErr *regexp.Regexp -} - func runTestCases(t testing.T, cases []testCase) { t.Helper() @@ -52,29 +45,6 @@ func matchAnyError(errs []error, r *regexp.Regexp) bool { return false } -func runDiagTestCases(t testing.T, cases []diagTestCase) { - t.Helper() - - for i, tc := range cases { - p := cty.Path{ - cty.GetAttrStep{Name: "test_property"}, - } - diags := tc.f(tc.val, p) - - if !diags.HasError() && tc.expectedErr == nil { - continue - } - - if diags.HasError() && tc.expectedErr == nil { - t.Fatalf("expected test case %d to produce no errors, got %v", i, diags) - } - - if !matchAnyDiagSummary(diags, tc.expectedErr) { - t.Fatalf("expected test case %d to produce error matching \"%s\", got %v", i, tc.expectedErr, diags) - } - } -} - func matchAnyDiagSummary(ds diag.Diagnostics, r *regexp.Regexp) bool { for _, d := range ds { if r.MatchString(d.Summary) { diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/time.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/time.go index df3e2620c..fde3a0199 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/time.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/time.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -// IsDayOfTheWeek id a SchemaValidateFunc which tests if the provided value is of type string and a valid english day of the week +// IsDayOfTheWeek is a SchemaValidateFunc which tests if the provided value is of type string and a valid english day of the week func IsDayOfTheWeek(ignoreCase bool) schema.SchemaValidateFunc { return StringInSlice([]string{ "Monday", @@ -20,7 +20,7 @@ func IsDayOfTheWeek(ignoreCase bool) schema.SchemaValidateFunc { }, ignoreCase) } -// IsMonth id a SchemaValidateFunc which tests if the provided value is of type string and a valid english month +// IsMonth is a SchemaValidateFunc which tests if the provided value is of type string and a valid english month func IsMonth(ignoreCase bool) schema.SchemaValidateFunc { return StringInSlice([]string{ "January", diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go index f31d833d2..113d3d675 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go @@ -92,7 +92,6 @@ func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Tra "Invalid address operator", "Module address prefix must be followed by dot and then a name.", )) - break } if next != "module" { @@ -122,7 +121,6 @@ func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Tra "Invalid address operator", "Prefix \"module.\" must be followed by a module name.", )) - break } remain = remain[1:] step := ModuleInstanceStep{ diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go index e620e76a9..b96e17c58 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go @@ -85,15 +85,15 @@ func flatmapValueFromHCL2Map(m map[string]string, prefix string, val cty.Value) return } - len := 0 + valLen := 0 for it := val.ElementIterator(); it.Next(); { ak, av := it.Element() name := ak.AsString() flatmapValueFromHCL2Value(m, prefix+name, av) - len++ + valLen++ } if !val.Type().IsObjectType() { // objects don't have an explicit count included, since their attribute count is fixed - m[prefix+"%"] = strconv.Itoa(len) + m[prefix+"%"] = strconv.Itoa(valLen) } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/single_attr_body.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/single_attr_body.go deleted file mode 100644 index 68f48da8f..000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/single_attr_body.go +++ /dev/null @@ -1,85 +0,0 @@ -package hcl2shim - -import ( - "fmt" - - hcl2 "github.com/hashicorp/hcl/v2" -) - -// SingleAttrBody is a weird implementation of hcl2.Body that acts as if -// it has a single attribute whose value is the given expression. -// -// This is used to shim Resource.RawCount and Output.RawConfig to behave -// more like they do in the old HCL loader. -type SingleAttrBody struct { - Name string - Expr hcl2.Expression -} - -var _ hcl2.Body = SingleAttrBody{} - -func (b SingleAttrBody) Content(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Diagnostics) { - content, all, diags := b.content(schema) - if !all { - // This should never happen because this body implementation should only - // be used by code that is aware that it's using a single-attr body. - diags = append(diags, &hcl2.Diagnostic{ - Severity: hcl2.DiagError, - Summary: "Invalid attribute", - Detail: fmt.Sprintf("The correct attribute name is %q.", b.Name), - Subject: b.Expr.Range().Ptr(), - }) - } - return content, diags -} - -func (b SingleAttrBody) PartialContent(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Body, hcl2.Diagnostics) { - content, all, diags := b.content(schema) - var remain hcl2.Body - if all { - // If the request matched the one attribute we represent, then the - // remaining body is empty. - remain = hcl2.EmptyBody() - } else { - remain = b - } - return content, remain, diags -} - -func (b SingleAttrBody) content(schema *hcl2.BodySchema) (*hcl2.BodyContent, bool, hcl2.Diagnostics) { - ret := &hcl2.BodyContent{} - all := false - var diags hcl2.Diagnostics - - for _, attrS := range schema.Attributes { - if attrS.Name == b.Name { - attrs, _ := b.JustAttributes() - ret.Attributes = attrs - all = true - } else if attrS.Required { - diags = append(diags, &hcl2.Diagnostic{ - Severity: hcl2.DiagError, - Summary: "Missing attribute", - Detail: fmt.Sprintf("The attribute %q is required.", attrS.Name), - Subject: b.Expr.Range().Ptr(), - }) - } - } - - return ret, all, diags -} - -func (b SingleAttrBody) JustAttributes() (hcl2.Attributes, hcl2.Diagnostics) { - return hcl2.Attributes{ - b.Name: { - Expr: b.Expr, - Name: b.Name, - NameRange: b.Expr.Range(), - Range: b.Expr.Range(), - }, - }, nil -} - -func (b SingleAttrBody) MissingItemRange() hcl2.Range { - return b.Expr.Range() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/context.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/context.go new file mode 100644 index 000000000..5bce8140f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/context.go @@ -0,0 +1,75 @@ +package logging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-log/tfsdklog" + helperlogging "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" + testing "github.com/mitchellh/go-testing-interface" +) + +// InitContext creates SDK logger contexts when the provider is running in +// "production" (not under acceptance testing). The incoming context will +// already have the root SDK logger and root provider logger setup from +// terraform-plugin-go tf5server RPC handlers. +func InitContext(ctx context.Context) context.Context { + ctx = tfsdklog.NewSubsystem(ctx, SubsystemHelperSchema, + // All calls are through the HelperSchema* helper functions + tfsdklog.WithAdditionalLocationOffset(1), + tfsdklog.WithLevelFromEnv(EnvTfLogSdkHelperSchema), + // Propagate tf_req_id, tf_rpc, etc. fields + tfsdklog.WithRootFields(), + ) + + return ctx +} + +// InitTestContext registers the terraform-plugin-log/tfsdklog test sink, +// configures the standard library log package, and creates SDK logger +// contexts. The incoming context is expected to be devoid of logging setup. +// +// The standard library log package handling is important as provider code +// under test may be using that package or another logging library outside of +// terraform-plugin-log. +func InitTestContext(ctx context.Context, t testing.T) context.Context { + helperlogging.SetOutput(t) + + ctx = tfsdklog.RegisterTestSink(ctx, t) + ctx = tfsdklog.NewRootSDKLogger(ctx, tfsdklog.WithLevelFromEnv(EnvTfLogSdk)) + ctx = tfsdklog.NewSubsystem(ctx, SubsystemHelperResource, + // All calls are through the HelperResource* helper functions + tfsdklog.WithAdditionalLocationOffset(1), + tfsdklog.WithLevelFromEnv(EnvTfLogSdkHelperResource), + ) + ctx = TestNameContext(ctx, t.Name()) + + return ctx +} + +// TestNameContext adds the current test name to loggers. +func TestNameContext(ctx context.Context, testName string) context.Context { + ctx = tfsdklog.SubsystemWith(ctx, SubsystemHelperResource, KeyTestName, testName) + + return ctx +} + +// TestStepNumberContext adds the current test step number to loggers. +func TestStepNumberContext(ctx context.Context, stepNumber int) context.Context { + ctx = tfsdklog.SubsystemWith(ctx, SubsystemHelperResource, KeyTestStepNumber, stepNumber) + + return ctx +} + +// TestTerraformPathContext adds the current test Terraform CLI path to loggers. +func TestTerraformPathContext(ctx context.Context, terraformPath string) context.Context { + ctx = tfsdklog.SubsystemWith(ctx, SubsystemHelperResource, KeyTestTerraformPath, terraformPath) + + return ctx +} + +// TestWorkingDirectoryContext adds the current test working directory to loggers. +func TestWorkingDirectoryContext(ctx context.Context, workingDirectory string) context.Context { + ctx = tfsdklog.SubsystemWith(ctx, SubsystemHelperResource, KeyTestWorkingDirectory, workingDirectory) + + return ctx +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/environment_variables.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/environment_variables.go new file mode 100644 index 000000000..db1a27a81 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/environment_variables.go @@ -0,0 +1,24 @@ +package logging + +// Environment variables. +const ( + // EnvTfLogSdk is an environment variable that sets the logging level of + // the root SDK logger, while the provider is under test. In "production" + // usage, this environment variable is handled by terraform-plugin-go. + // + // Terraform CLI's logging must be explicitly turned on before this + // environment varable can be used to reduce the SDK logging levels. It + // cannot be used to show only SDK logging unless all other logging levels + // are turned off. + EnvTfLogSdk = "TF_LOG_SDK" + + // EnvTfLogSdkHelperResource is an environment variable that sets the logging + // level of SDK helper/resource loggers. Infers root SDK logging level, if + // unset. + EnvTfLogSdkHelperResource = "TF_LOG_SDK_HELPER_RESOURCE" + + // EnvTfLogSdkHelperSchema is an environment variable that sets the logging + // level of SDK helper/schema loggers. Infers root SDK logging level, if + // unset. + EnvTfLogSdkHelperSchema = "TF_LOG_SDK_HELPER_SCHEMA" +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/helper_resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/helper_resource.go new file mode 100644 index 000000000..a889601c7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/helper_resource.go @@ -0,0 +1,32 @@ +package logging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-log/tfsdklog" +) + +const ( + // SubsystemHelperResource is the tfsdklog subsystem name for helper/resource. + SubsystemHelperResource = "helper_resource" +) + +// HelperResourceTrace emits a helper/resource subsystem log at TRACE level. +func HelperResourceTrace(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemTrace(ctx, SubsystemHelperResource, msg, additionalFields...) +} + +// HelperResourceDebug emits a helper/resource subsystem log at DEBUG level. +func HelperResourceDebug(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemDebug(ctx, SubsystemHelperResource, msg, additionalFields...) +} + +// HelperResourceWarn emits a helper/resource subsystem log at WARN level. +func HelperResourceWarn(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemWarn(ctx, SubsystemHelperResource, msg, additionalFields...) +} + +// HelperResourceError emits a helper/resource subsystem log at ERROR level. +func HelperResourceError(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemError(ctx, SubsystemHelperResource, msg, additionalFields...) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/helper_schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/helper_schema.go new file mode 100644 index 000000000..b2fe71d15 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/helper_schema.go @@ -0,0 +1,32 @@ +package logging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-log/tfsdklog" +) + +const ( + // SubsystemHelperSchema is the tfsdklog subsystem name for helper/schema. + SubsystemHelperSchema = "helper_schema" +) + +// HelperSchemaDebug emits a helper/schema subsystem log at DEBUG level. +func HelperSchemaDebug(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemDebug(ctx, SubsystemHelperSchema, msg, additionalFields...) +} + +// HelperSchemaError emits a helper/schema subsystem log at ERROR level. +func HelperSchemaError(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemError(ctx, SubsystemHelperSchema, msg, additionalFields...) +} + +// HelperSchemaTrace emits a helper/schema subsystem log at TRACE level. +func HelperSchemaTrace(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemTrace(ctx, SubsystemHelperSchema, msg, additionalFields...) +} + +// HelperSchemaWarn emits a helper/schema subsystem log at WARN level. +func HelperSchemaWarn(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemWarn(ctx, SubsystemHelperSchema, msg, additionalFields...) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/keys.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/keys.go new file mode 100644 index 000000000..2ba548f61 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/keys.go @@ -0,0 +1,45 @@ +package logging + +// Structured logging keys. +// +// Practitioners or tooling reading logs may be depending on these keys, so be +// conscious of that when changing them. +// +// Refer to the terraform-plugin-go logging keys as well, which should be +// equivalent to these when possible. +const ( + // Attribute path representation, which is typically in flatmap form such + // as parent.0.child in this project. + KeyAttributePath = "tf_attribute_path" + + // The type of data source being operated on, such as "archive_file" + KeyDataSourceType = "tf_data_source_type" + + // Underlying Go error string when logging an error. + KeyError = "error" + + // The full address of the provider, such as + // registry.terraform.io/hashicorp/random + KeyProviderAddress = "tf_provider_addr" + + // The type of resource being operated on, such as "random_pet" + KeyResourceType = "tf_resource_type" + + // The name of the test being executed. + KeyTestName = "test_name" + + // The TestStep number of the test being executed. Starts at 1. + KeyTestStepNumber = "test_step_number" + + // The path to the Terraform CLI logging file used for an acceptance test. + // + // This should match where the rest of the acceptance test logs are going + // already, but is provided for troubleshooting in case it does not. + KeyTestTerraformLogPath = "test_terraform_log_path" + + // The path to the Terraform CLI used for an acceptance test. + KeyTestTerraformPath = "test_terraform_path" + + // The working directory of the acceptance test. + KeyTestWorkingDirectory = "test_working_directory" +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go index 252456747..e02c1e443 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go @@ -1,34 +1,61 @@ package convert import ( - "fmt" + "context" "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-go/tftypes" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" ) // AppendProtoDiag appends a new diagnostic from a warning string or an error. // This panics if d is not a string or error. -func AppendProtoDiag(diags []*tfprotov5.Diagnostic, d interface{}) []*tfprotov5.Diagnostic { +func AppendProtoDiag(ctx context.Context, diags []*tfprotov5.Diagnostic, d interface{}) []*tfprotov5.Diagnostic { switch d := d.(type) { case cty.PathError: ap := PathToAttributePath(d.Path) - diags = append(diags, &tfprotov5.Diagnostic{ + diagnostic := &tfprotov5.Diagnostic{ Severity: tfprotov5.DiagnosticSeverityError, Summary: d.Error(), Attribute: ap, - }) + } + + if diagnostic.Summary == "" { + logging.HelperSchemaWarn(ctx, "detected empty error string for diagnostic in AppendProtoDiag for cty.PathError type") + diagnostic.Summary = "Empty Error String" + diagnostic.Detail = "This is always a bug in the provider and should be reported to the provider developers." + } + + diags = append(diags, diagnostic) case diag.Diagnostics: diags = append(diags, DiagsToProto(d)...) case error: - diags = append(diags, &tfprotov5.Diagnostic{ + if d == nil { + logging.HelperSchemaDebug(ctx, "skipping diagnostic for nil error in AppendProtoDiag") + return diags + } + + diagnostic := &tfprotov5.Diagnostic{ Severity: tfprotov5.DiagnosticSeverityError, Summary: d.Error(), - }) + } + + if diagnostic.Summary == "" { + logging.HelperSchemaWarn(ctx, "detected empty error string for diagnostic in AppendProtoDiag for error type") + diagnostic.Summary = "Error Missing Message" + diagnostic.Detail = "This is always a bug in the provider and should be reported to the provider developers." + } + + diags = append(diags, diagnostic) case string: + if d == "" { + logging.HelperSchemaDebug(ctx, "skipping diagnostic for empty string in AppendProtoDiag") + return diags + } + diags = append(diags, &tfprotov5.Diagnostic{ Severity: tfprotov5.DiagnosticSeverityWarning, Summary: d, @@ -68,19 +95,18 @@ func ProtoToDiags(ds []*tfprotov5.Diagnostic) diag.Diagnostics { func DiagsToProto(diags diag.Diagnostics) []*tfprotov5.Diagnostic { var ds []*tfprotov5.Diagnostic for _, d := range diags { - if err := d.Validate(); err != nil { - panic(fmt.Errorf("Invalid diagnostic: %s. This is always a bug in the provider implementation", err)) - } protoDiag := &tfprotov5.Diagnostic{ + Severity: tfprotov5.DiagnosticSeverityError, Summary: d.Summary, Detail: d.Detail, Attribute: PathToAttributePath(d.AttributePath), } - if d.Severity == diag.Error { - protoDiag.Severity = tfprotov5.DiagnosticSeverityError - } else if d.Severity == diag.Warning { + if d.Severity == diag.Warning { protoDiag.Severity = tfprotov5.DiagnosticSeverityWarning } + if d.Summary == "" { + protoDiag.Summary = "Empty Summary: This is always a bug in the provider and should be reported to the provider developers." + } ds = append(ds, protoDiag) } return ds @@ -93,13 +119,13 @@ func AttributePathToPath(ap *tftypes.AttributePath) cty.Path { return p } for _, step := range ap.Steps() { - switch step.(type) { + switch step := step.(type) { case tftypes.AttributeName: - p = p.GetAttr(string(step.(tftypes.AttributeName))) + p = p.GetAttr(string(step)) case tftypes.ElementKeyString: - p = p.Index(cty.StringVal(string(step.(tftypes.ElementKeyString)))) + p = p.Index(cty.StringVal(string(step))) case tftypes.ElementKeyInt: - p = p.Index(cty.NumberIntVal(int64(step.(tftypes.ElementKeyInt)))) + p = p.Index(cty.NumberIntVal(int64(step))) } } return p diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go index c5dbf5b8f..07d0b8978 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go @@ -1,8 +1,8 @@ package convert import ( + "context" "fmt" - "log" "reflect" "sort" @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-go/tftypes" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" ) func tftypeFromCtyType(in cty.Type) (tftypes.Type, error) { @@ -128,10 +129,10 @@ func ctyTypeFromTFType(in tftypes.Type) (cty.Type, error) { // ConfigSchemaToProto takes a *configschema.Block and converts it to a // tfprotov5.SchemaBlock for a grpc response. -func ConfigSchemaToProto(b *configschema.Block) *tfprotov5.SchemaBlock { +func ConfigSchemaToProto(ctx context.Context, b *configschema.Block) *tfprotov5.SchemaBlock { block := &tfprotov5.SchemaBlock{ Description: b.Description, - DescriptionKind: protoStringKind(b.DescriptionKind), + DescriptionKind: protoStringKind(ctx, b.DescriptionKind), Deprecated: b.Deprecated, } @@ -141,7 +142,7 @@ func ConfigSchemaToProto(b *configschema.Block) *tfprotov5.SchemaBlock { attr := &tfprotov5.SchemaAttribute{ Name: name, Description: a.Description, - DescriptionKind: protoStringKind(a.DescriptionKind), + DescriptionKind: protoStringKind(ctx, a.DescriptionKind), Optional: a.Optional, Computed: a.Computed, Required: a.Required, @@ -160,16 +161,16 @@ func ConfigSchemaToProto(b *configschema.Block) *tfprotov5.SchemaBlock { for _, name := range sortedKeys(b.BlockTypes) { b := b.BlockTypes[name] - block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(name, b)) + block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(ctx, name, b)) } return block } -func protoStringKind(k configschema.StringKind) tfprotov5.StringKind { +func protoStringKind(ctx context.Context, k configschema.StringKind) tfprotov5.StringKind { switch k { default: - log.Printf("[TRACE] unexpected configschema.StringKind: %d", k) + logging.HelperSchemaTrace(ctx, fmt.Sprintf("Unexpected configschema.StringKind: %d", k)) return tfprotov5.StringKindPlain case configschema.StringPlain: return tfprotov5.StringKindPlain @@ -178,7 +179,7 @@ func protoStringKind(k configschema.StringKind) tfprotov5.StringKind { } } -func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *tfprotov5.SchemaNestedBlock { +func protoSchemaNestedBlock(ctx context.Context, name string, b *configschema.NestedBlock) *tfprotov5.SchemaNestedBlock { var nesting tfprotov5.SchemaNestedBlockNestingMode switch b.Nesting { case configschema.NestingSingle: @@ -196,7 +197,7 @@ func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *tfprotov5 } return &tfprotov5.SchemaNestedBlock{ TypeName: name, - Block: ConfigSchemaToProto(&b.Block), + Block: ConfigSchemaToProto(ctx, &b.Block), Nesting: nesting, MinItems: int64(b.MinItems), MaxItems: int64(b.MaxItems), @@ -205,20 +206,20 @@ func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *tfprotov5 // ProtoToConfigSchema takes the GetSchema_Block from a grpc response and converts it // to a terraform *configschema.Block. -func ProtoToConfigSchema(b *tfprotov5.SchemaBlock) *configschema.Block { +func ProtoToConfigSchema(ctx context.Context, b *tfprotov5.SchemaBlock) *configschema.Block { block := &configschema.Block{ Attributes: make(map[string]*configschema.Attribute), BlockTypes: make(map[string]*configschema.NestedBlock), Description: b.Description, - DescriptionKind: schemaStringKind(b.DescriptionKind), + DescriptionKind: schemaStringKind(ctx, b.DescriptionKind), Deprecated: b.Deprecated, } for _, a := range b.Attributes { attr := &configschema.Attribute{ Description: a.Description, - DescriptionKind: schemaStringKind(a.DescriptionKind), + DescriptionKind: schemaStringKind(ctx, a.DescriptionKind), Required: a.Required, Optional: a.Optional, Computed: a.Computed, @@ -236,16 +237,16 @@ func ProtoToConfigSchema(b *tfprotov5.SchemaBlock) *configschema.Block { } for _, b := range b.BlockTypes { - block.BlockTypes[b.TypeName] = schemaNestedBlock(b) + block.BlockTypes[b.TypeName] = schemaNestedBlock(ctx, b) } return block } -func schemaStringKind(k tfprotov5.StringKind) configschema.StringKind { +func schemaStringKind(ctx context.Context, k tfprotov5.StringKind) configschema.StringKind { switch k { default: - log.Printf("[TRACE] unexpected tfprotov5.StringKind: %d", k) + logging.HelperSchemaTrace(ctx, fmt.Sprintf("Unexpected tfprotov5.StringKind: %d", k)) return configschema.StringPlain case tfprotov5.StringKindPlain: return configschema.StringPlain @@ -254,7 +255,7 @@ func schemaStringKind(k tfprotov5.StringKind) configschema.StringKind { } } -func schemaNestedBlock(b *tfprotov5.SchemaNestedBlock) *configschema.NestedBlock { +func schemaNestedBlock(ctx context.Context, b *tfprotov5.SchemaNestedBlock) *configschema.NestedBlock { var nesting configschema.NestingMode switch b.Nesting { case tfprotov5.SchemaNestedBlockNestingModeSingle: @@ -278,7 +279,7 @@ func schemaNestedBlock(b *tfprotov5.SchemaNestedBlock) *configschema.NestedBlock MaxItems: int(b.MaxItems), } - nested := ProtoToConfigSchema(b.Block) + nested := ProtoToConfigSchema(ctx, b.Block) nb.Block = *nested return nb } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/config.go index 2c544e9e5..c238145aa 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/config.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/config.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/hc-install/product" "github.com/hashicorp/hc-install/releases" "github.com/hashicorp/hc-install/src" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" ) // Config is used to configure the test helper. In most normal test programs @@ -29,11 +30,11 @@ type Config struct { // DiscoverConfig uses environment variables and other means to automatically // discover a reasonable test helper configuration. -func DiscoverConfig(sourceDir string) (*Config, error) { - tfVersion := strings.TrimPrefix(os.Getenv("TF_ACC_TERRAFORM_VERSION"), "v") - tfPath := os.Getenv("TF_ACC_TERRAFORM_PATH") +func DiscoverConfig(ctx context.Context, sourceDir string) (*Config, error) { + tfVersion := strings.TrimPrefix(os.Getenv(EnvTfAccTerraformVersion), "v") + tfPath := os.Getenv(EnvTfAccTerraformPath) - tempDir := os.Getenv("TF_ACC_TEMP_DIR") + tempDir := os.Getenv(EnvTfAccTempDir) tfDir, err := ioutil.TempDir(tempDir, "plugintest-terraform") if err != nil { return nil, fmt.Errorf("failed to create temp dir: %w", err) @@ -42,6 +43,8 @@ func DiscoverConfig(sourceDir string) (*Config, error) { var sources []src.Source switch { case tfPath != "": + logging.HelperResourceTrace(ctx, fmt.Sprintf("Adding potential Terraform CLI source of exact path: %s", tfPath)) + sources = append(sources, &fs.AnyVersion{ ExactBinPath: tfPath, }) @@ -52,12 +55,17 @@ func DiscoverConfig(sourceDir string) (*Config, error) { return nil, fmt.Errorf("invalid Terraform version: %w", err) } + logging.HelperResourceTrace(ctx, fmt.Sprintf("Adding potential Terraform CLI source of releases.hashicorp.com exact version %q for installation in: %s", tfVersion, tfDir)) + sources = append(sources, &releases.ExactVersion{ InstallDir: tfDir, Product: product.Terraform, Version: tfVersion, }) default: + logging.HelperResourceTrace(ctx, "Adding potential Terraform CLI source of local filesystem PATH lookup") + logging.HelperResourceTrace(ctx, fmt.Sprintf("Adding potential Terraform CLI source of checkpoint.hashicorp.com latest version for installation in: %s", tfDir)) + sources = append(sources, &fs.AnyVersion{ Product: &product.Terraform, }) @@ -70,9 +78,13 @@ func DiscoverConfig(sourceDir string) (*Config, error) { installer := install.NewInstaller() tfExec, err := installer.Ensure(context.Background(), sources) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to find or install Terraform CLI from %+v: %w", sources, err) } + ctx = logging.TestTerraformPathContext(ctx, tfExec) + + logging.HelperResourceDebug(ctx, "Found Terraform CLI") + return &Config{ SourceDir: sourceDir, TerraformExec: tfExec, diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/environment_variables.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/environment_variables.go new file mode 100644 index 000000000..ac75151a4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/environment_variables.go @@ -0,0 +1,63 @@ +package plugintest + +// Environment variables +const ( + // Environment variable with acceptance testing temporary directory for + // testing files and Terraform CLI installation, if installation is + // required. By default, the operating system temporary directory is used. + // + // Setting TF_ACC_TERRAFORM_PATH does not override this value for Terraform + // CLI installation, if installation is required. + EnvTfAccTempDir = "TF_ACC_TEMP_DIR" + + // Environment variable with path to save Terraform logs during acceptance + // testing. This value sets TF_LOG_PATH in a safe manner when executing + // Terraform CLI commands, which would otherwise be ignored since it could + // interfere with how the underlying execution is performed. + // + // If TF_LOG_PATH_MASK is set, it takes precedence over this value. + EnvTfAccLogPath = "TF_ACC_LOG_PATH" + + // Environment variable with path containing the string %s, which is + // replaced with the test name, to save separate Terraform logs during + // acceptance testing. This value sets TF_LOG_PATH in a safe manner when + // executing Terraform CLI commands, which would otherwise be ignored since + // it could interfere with how the underlying execution is performed. + // + // Takes precedence over TF_ACC_LOG_PATH. + EnvTfLogPathMask = "TF_LOG_PATH_MASK" + + // Environment variable with acceptance testing Terraform CLI version to + // download from releases.hashicorp.com, checksum verify, and install. The + // value can be any valid Terraform CLI version, such as 1.1.6, with or + // without a prepended v character. + // + // Setting this value takes precedence over using an available Terraform + // binary in the operation system PATH, or if not found, installing the + // latest version according to checkpoint.hashicorp.com. + // + // By default, the binary is installed in the operating system temporary + // directory, however that directory can be overridden with the + // TF_ACC_TEMP_DIR environment variable. + // + // If TF_ACC_TERRAFORM_PATH is also set, this installation method is + // only invoked when a binary does not exist at that path. No version + // checks are performed against an existing TF_ACC_TERRAFORM_PATH. + EnvTfAccTerraformVersion = "TF_ACC_TERRAFORM_VERSION" + + // Acceptance testing path to Terraform CLI binary. + // + // Setting this value takes precedence over using an available Terraform + // binary in the operation system PATH, or if not found, installing the + // latest version according to checkpoint.hashicorp.com. This value does + // not override TF_ACC_TEMP_DIR for Terraform CLI installation, if + // installation is required. + // + // If TF_ACC_TERRAFORM_VERSION is not set, the binary must exist and be + // executable, or an error will be returned. + // + // If TF_ACC_TERRAFORM_VERSION is also set, that Terraform CLI version + // will be installed if a binary is not found at the given path. No version + // checks are performed against an existing binary. + EnvTfAccTerraformPath = "TF_ACC_TERRAFORM_PATH" +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/guard.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/guard.go index 56eaa0c16..ad796be08 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/guard.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/guard.go @@ -2,54 +2,8 @@ package plugintest import ( "fmt" - "os" - "testing" ) -// AcceptanceTest is a test guard that will produce a log and call SkipNow on -// the given TestControl if the environment variable TF_ACC isn't set to -// indicate that the caller wants to run acceptance tests. -// -// Call this immediately at the start of each acceptance test function to -// signal that it may cost money and thus requires this opt-in enviromment -// variable. -// -// For the purpose of this function, an "acceptance test" is any est that -// reaches out to services that are not directly controlled by the test program -// itself, particularly if those requests may lead to service charges. For any -// system where it is possible and realistic to run a local instance of the -// service for testing (e.g. in a daemon launched by the test program itself), -// prefer to do this and _don't_ call AcceptanceTest, thus allowing tests to be -// run more easily and without external cost by contributors. -func AcceptanceTest(t TestControl) { - t.Helper() - if os.Getenv("TF_ACC") != "" { - t.Log("TF_ACC is not set") - t.SkipNow() - } -} - -// LongTest is a test guard that will produce a log and call SkipNow on the -// given TestControl if the test harness is currently running in "short mode". -// -// What is considered a "long test" will always be pretty subjective, but test -// implementers should think of this in terms of what seems like it'd be -// inconvenient to run repeatedly for quick feedback while testing a new feature -// under development. -// -// When testing resource types that always take several minutes to complete -// operations, consider having a single general test that covers the basic -// functionality and then mark any other more specific tests as long tests so -// that developers can quickly smoke-test a particular feature when needed -// but can still run the full set of tests for a feature when needed. -func LongTest(t TestControl) { - t.Helper() - if testing.Short() { - t.Log("skipping long test because of short mode") - t.SkipNow() - } -} - // TestControl is an interface requiring a subset of *testing.T which is used // by the test guards and helpers in this package. Most callers can simply // pass their *testing.T value here, but the interface allows other @@ -65,6 +19,7 @@ type TestControl interface { Log(args ...interface{}) FailNow() SkipNow() + Name() string } // testingT wraps a TestControl to recover some of the convenience behaviors diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go index 4b130b499..d5aecd87b 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go @@ -1,16 +1,17 @@ package plugintest import ( + "context" + "errors" "fmt" "io/ioutil" "os" + "strings" "github.com/hashicorp/terraform-exec/tfexec" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" ) -const subprocessCurrentSigil = "4acd63807899403ca4859f5bb948d2c6" -const subprocessPreviousSigil = "2279afb8cf71423996be1fd65d32f13b" - // AutoInitProviderHelper is the main entrypoint for testing provider plugins // using this package. It is intended to be called during TestMain to prepare // for provider testing. @@ -20,8 +21,8 @@ const subprocessPreviousSigil = "2279afb8cf71423996be1fd65d32f13b" // available for upgrade tests, and then will return an object containing the // results of that initialization which can then be stored in a global variable // for use in other tests. -func AutoInitProviderHelper(sourceDir string) *Helper { - helper, err := AutoInitHelper(sourceDir) +func AutoInitProviderHelper(ctx context.Context, sourceDir string) *Helper { + helper, err := AutoInitHelper(ctx, sourceDir) if err != nil { fmt.Fprintf(os.Stderr, "cannot run Terraform provider tests: %s\n", err) os.Exit(1) @@ -49,13 +50,13 @@ type Helper struct { // way to get the standard init behavior based on environment variables, and // callers should use this unless they have an unusual requirement that calls // for constructing a config in a different way. -func AutoInitHelper(sourceDir string) (*Helper, error) { - config, err := DiscoverConfig(sourceDir) +func AutoInitHelper(ctx context.Context, sourceDir string) (*Helper, error) { + config, err := DiscoverConfig(ctx, sourceDir) if err != nil { return nil, err } - return InitHelper(config) + return InitHelper(ctx, config) } // InitHelper prepares a testing helper with the given configuration. @@ -67,8 +68,8 @@ func AutoInitHelper(sourceDir string) (*Helper, error) { // If this function returns an error then it may have left some temporary files // behind in the system's temporary directory. There is currently no way to // automatically clean those up. -func InitHelper(config *Config) (*Helper, error) { - tempDir := os.Getenv("TF_ACC_TEMP_DIR") +func InitHelper(ctx context.Context, config *Config) (*Helper, error) { + tempDir := os.Getenv(EnvTfAccTempDir) baseDir, err := ioutil.TempDir(tempDir, "plugintest") if err != nil { return nil, fmt.Errorf("failed to create temporary directory for test helper: %s", err) @@ -103,22 +104,68 @@ func (h *Helper) Close() error { // If the working directory object is not itself closed by the time the test // program exits, the Close method on the helper itself will attempt to // delete it. -func (h *Helper) NewWorkingDir() (*WorkingDir, error) { +func (h *Helper) NewWorkingDir(ctx context.Context, t TestControl) (*WorkingDir, error) { dir, err := ioutil.TempDir(h.baseDir, "work") if err != nil { return nil, err } + ctx = logging.TestWorkingDirectoryContext(ctx, dir) + // symlink the provider source files into the config directory // e.g. testdata + logging.HelperResourceTrace(ctx, "Symlinking source directories to work directory") err = symlinkDirectoriesOnly(h.sourceDir, dir) if err != nil { return nil, err } tf, err := tfexec.NewTerraform(dir, h.terraformExec) + if err != nil { - return nil, err + return nil, fmt.Errorf("unable to create terraform-exec instance: %w", err) + } + + err = tf.SetDisablePluginTLS(true) + + if err != nil { + return nil, fmt.Errorf("unable to disable terraform-exec plugin TLS: %w", err) + } + + err = tf.SetSkipProviderVerify(true) // Only required for Terraform CLI 0.12.x + + var mismatch *tfexec.ErrVersionMismatch + if err != nil && !errors.As(err, &mismatch) { + return nil, fmt.Errorf("unable to disable terraform-exec provider verification: %w", err) + } + + var logPath, logPathEnvVar string + + if tfAccLogPath := os.Getenv(EnvTfAccLogPath); tfAccLogPath != "" { + logPath = tfAccLogPath + logPathEnvVar = EnvTfAccLogPath + } + + // Similar to helper/logging.LogOutput() and + // terraform-plugin-log/tfsdklog.RegisterTestSink(), the TF_LOG_PATH_MASK + // environment variable should take precedence over TF_ACC_LOG_PATH. + if tfLogPathMask := os.Getenv(EnvTfLogPathMask); tfLogPathMask != "" { + // Escape special characters which may appear if we have subtests + testName := strings.Replace(t.Name(), "/", "__", -1) + logPath = fmt.Sprintf(tfLogPathMask, testName) + logPathEnvVar = EnvTfLogPathMask + } + + if logPath != "" { + logging.HelperResourceTrace( + ctx, + fmt.Sprintf("Setting terraform-exec log path via %s environment variable", logPathEnvVar), + map[string]interface{}{logging.KeyTestTerraformLogPath: logPath}, + ) + + if err := tf.SetLogPath(logPath); err != nil { + return nil, fmt.Errorf("unable to set terraform-exec log path (%s): %w", logPath, err) + } } return &WorkingDir{ @@ -132,10 +179,10 @@ func (h *Helper) NewWorkingDir() (*WorkingDir, error) { // RequireNewWorkingDir is a variant of NewWorkingDir that takes a TestControl // object and will immediately fail the running test if the creation of the // working directory fails. -func (h *Helper) RequireNewWorkingDir(t TestControl) *WorkingDir { +func (h *Helper) RequireNewWorkingDir(ctx context.Context, t TestControl) *WorkingDir { t.Helper() - wd, err := h.NewWorkingDir() + wd, err := h.NewWorkingDir(ctx, t) if err != nil { t := testingT{t} t.Fatalf("failed to create new working directory: %s", err) @@ -144,6 +191,11 @@ func (h *Helper) RequireNewWorkingDir(t TestControl) *WorkingDir { return wd } +// WorkingDirectory returns the working directory being used when running tests. +func (h *Helper) WorkingDirectory() string { + return h.baseDir +} + // TerraformExecPath returns the location of the Terraform CLI executable that // should be used when running tests. func (h *Helper) TerraformExecPath() string { diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/util.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/util.go index df1cb92fd..335e217ad 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/util.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/util.go @@ -1,20 +1,31 @@ package plugintest import ( + "fmt" "os" "path/filepath" ) -func symlinkFile(src string, dest string) (err error) { - err = os.Symlink(src, dest) - if err == nil { - srcInfo, err := os.Stat(src) - if err != nil { - err = os.Chmod(dest, srcInfo.Mode()) - } +func symlinkFile(src string, dest string) error { + err := os.Symlink(src, dest) + + if err != nil { + return fmt.Errorf("unable to symlink %q to %q: %w", src, dest, err) } - return + srcInfo, err := os.Stat(src) + + if err != nil { + return fmt.Errorf("unable to stat %q: %w", src, err) + } + + err = os.Chmod(dest, srcInfo.Mode()) + + if err != nil { + return fmt.Errorf("unable to set %q permissions: %w", dest, err) + } + + return nil } // symlinkDir is a simplistic function for recursively symlinking all files in a directory to a new path. diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go index 1faeb5c7a..1c15e6f5c 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go @@ -3,7 +3,7 @@ package plugintest import ( "bytes" "context" - "errors" + "encoding/json" "fmt" "io/ioutil" "os" @@ -11,11 +11,13 @@ import ( "github.com/hashicorp/terraform-exec/tfexec" tfjson "github.com/hashicorp/terraform-json" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" ) const ( - ConfigFileName = "terraform_plugin_test.tf" - PlanFileName = "tfplan" + ConfigFileName = "terraform_plugin_test.tf" + ConfigFileNameJSON = ConfigFileName + ".json" + PlanFileName = "tfplan" ) // WorkingDir represents a distinct working directory that can be used for @@ -28,6 +30,10 @@ type WorkingDir struct { // baseDir is the root of the working directory tree baseDir string + // configFilename is the full filename where the latest configuration + // was stored; empty until SetConfig is called. + configFilename string + // baseArgs is arguments that should be appended to all commands baseArgs []string @@ -40,8 +46,6 @@ type WorkingDir struct { // reattachInfo stores the gRPC socket info required for Terraform's // plugin reattach functionality reattachInfo tfexec.ReattachInfo - - env map[string]string } // Close deletes the directories and files created to represent the receiving @@ -51,20 +55,8 @@ func (wd *WorkingDir) Close() error { return os.RemoveAll(wd.baseDir) } -// Setenv sets an environment variable on the WorkingDir. -func (wd *WorkingDir) Setenv(envVar, val string) { - if wd.env == nil { - wd.env = map[string]string{} - } - wd.env[envVar] = val -} - -// Unsetenv removes an environment variable from the WorkingDir. -func (wd *WorkingDir) Unsetenv(envVar string) { - delete(wd.env, envVar) -} - -func (wd *WorkingDir) SetReattachInfo(reattachInfo tfexec.ReattachInfo) { +func (wd *WorkingDir) SetReattachInfo(ctx context.Context, reattachInfo tfexec.ReattachInfo) { + logging.HelperResourceTrace(ctx, "Setting Terraform CLI reattach configuration", map[string]interface{}{"tf_reattach_config": reattachInfo}) wd.reattachInfo = reattachInfo } @@ -82,29 +74,24 @@ func (wd *WorkingDir) GetHelper() *Helper { // This must be called at least once before any call to Init, Plan, Apply, or // Destroy to establish the configuration. Any previously-set configuration is // discarded and any saved plan is cleared. -func (wd *WorkingDir) SetConfig(cfg string) error { - configFilename := filepath.Join(wd.baseDir, ConfigFileName) - err := ioutil.WriteFile(configFilename, []byte(cfg), 0700) - if err != nil { - return err +func (wd *WorkingDir) SetConfig(ctx context.Context, cfg string) error { + outFilename := filepath.Join(wd.baseDir, ConfigFileName) + rmFilename := filepath.Join(wd.baseDir, ConfigFileNameJSON) + bCfg := []byte(cfg) + if json.Valid(bCfg) { + outFilename, rmFilename = rmFilename, outFilename } - - var mismatch *tfexec.ErrVersionMismatch - err = wd.tf.SetDisablePluginTLS(true) - if err != nil && !errors.As(err, &mismatch) { - return err + if err := os.Remove(rmFilename); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to remove %q: %w", rmFilename, err) } - err = wd.tf.SetSkipProviderVerify(true) - if err != nil && !errors.As(err, &mismatch) { + err := ioutil.WriteFile(outFilename, bCfg, 0700) + if err != nil { return err } - - if p := os.Getenv("TF_ACC_LOG_PATH"); p != "" { - wd.tf.SetLogPath(p) - } + wd.configFilename = outFilename // Changing configuration invalidates any saved plan. - err = wd.ClearPlan() + err = wd.ClearPlan(ctx) if err != nil { return err } @@ -115,35 +102,66 @@ func (wd *WorkingDir) SetConfig(cfg string) error { // // Any remote objects tracked by the state are not destroyed first, so this // will leave them dangling in the remote system. -func (wd *WorkingDir) ClearState() error { +func (wd *WorkingDir) ClearState(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Clearing Terraform state") + err := os.Remove(filepath.Join(wd.baseDir, "terraform.tfstate")) + if os.IsNotExist(err) { + logging.HelperResourceTrace(ctx, "No Terraform state to clear") return nil } - return err + + if err != nil { + return err + } + + logging.HelperResourceTrace(ctx, "Cleared Terraform state") + + return nil } // ClearPlan deletes any saved plan present in the working directory. -func (wd *WorkingDir) ClearPlan() error { +func (wd *WorkingDir) ClearPlan(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Clearing Terraform plan") + err := os.Remove(wd.planFilename()) + if os.IsNotExist(err) { + logging.HelperResourceTrace(ctx, "No Terraform plan to clear") return nil } - return err + + if err != nil { + return err + } + + logging.HelperResourceTrace(ctx, "Cleared Terraform plan") + + return nil } +var errWorkingDirSetConfigNotCalled = fmt.Errorf("must call SetConfig before Init") + // Init runs "terraform init" for the given working directory, forcing Terraform // to use the current version of the plugin under test. -func (wd *WorkingDir) Init() error { - if _, err := os.Stat(wd.configFilename()); err != nil { - return fmt.Errorf("must call SetConfig before Init") +func (wd *WorkingDir) Init(ctx context.Context) error { + if wd.configFilename == "" { + return errWorkingDirSetConfigNotCalled + } + if _, err := os.Stat(wd.configFilename); err != nil { + return errWorkingDirSetConfigNotCalled } - return wd.tf.Init(context.Background(), tfexec.Reattach(wd.reattachInfo)) -} + logging.HelperResourceTrace(ctx, "Calling Terraform CLI init command") + + // -upgrade=true is required for per-TestStep provider version changes + // e.g. TestTest_TestStep_ExternalProviders_DifferentVersions + err := wd.tf.Init(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Upgrade(true)) -func (wd *WorkingDir) configFilename() string { - return filepath.Join(wd.baseDir, ConfigFileName) + logging.HelperResourceTrace(ctx, "Called Terraform CLI init command") + + return err } func (wd *WorkingDir) planFilename() string { @@ -152,15 +170,25 @@ func (wd *WorkingDir) planFilename() string { // CreatePlan runs "terraform plan" to create a saved plan file, which if successful // will then be used for the next call to Apply. -func (wd *WorkingDir) CreatePlan() error { +func (wd *WorkingDir) CreatePlan(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI plan command") + _, err := wd.tf.Plan(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false), tfexec.Out(PlanFileName)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI plan command") + return err } // CreateDestroyPlan runs "terraform plan -destroy" to create a saved plan // file, which if successful will then be used for the next call to Apply. -func (wd *WorkingDir) CreateDestroyPlan() error { +func (wd *WorkingDir) CreateDestroyPlan(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI plan -destroy command") + _, err := wd.tf.Plan(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false), tfexec.Out(PlanFileName), tfexec.Destroy(true)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI plan -destroy command") + return err } @@ -168,13 +196,19 @@ func (wd *WorkingDir) CreateDestroyPlan() error { // successfully and the saved plan has not been cleared in the meantime then // this will apply the saved plan. Otherwise, it will implicitly create a new // plan and apply it. -func (wd *WorkingDir) Apply() error { +func (wd *WorkingDir) Apply(ctx context.Context) error { args := []tfexec.ApplyOption{tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false)} if wd.HasSavedPlan() { args = append(args, tfexec.DirOrPlan(PlanFileName)) } - return wd.tf.Apply(context.Background(), args...) + logging.HelperResourceTrace(ctx, "Calling Terraform CLI apply command") + + err := wd.tf.Apply(context.Background(), args...) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI apply command") + + return err } // Destroy runs "terraform destroy". It does not consider or modify any saved @@ -182,8 +216,14 @@ func (wd *WorkingDir) Apply() error { // // If destroy fails then remote objects might still exist, and continue to // exist after a particular test is concluded. -func (wd *WorkingDir) Destroy() error { - return wd.tf.Destroy(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false)) +func (wd *WorkingDir) Destroy(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI destroy command") + + err := wd.tf.Destroy(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI destroy command") + + return err } // HasSavedPlan returns true if there is a saved plan in the working directory. If @@ -197,19 +237,25 @@ func (wd *WorkingDir) HasSavedPlan() bool { // // If no plan is saved or if the plan file cannot be read, SavedPlan returns // an error. -func (wd *WorkingDir) SavedPlan() (*tfjson.Plan, error) { +func (wd *WorkingDir) SavedPlan(ctx context.Context) (*tfjson.Plan, error) { if !wd.HasSavedPlan() { return nil, fmt.Errorf("there is no current saved plan") } - return wd.tf.ShowPlanFile(context.Background(), wd.planFilename(), tfexec.Reattach(wd.reattachInfo)) + logging.HelperResourceTrace(ctx, "Calling Terraform CLI apply command") + + plan, err := wd.tf.ShowPlanFile(context.Background(), wd.planFilename(), tfexec.Reattach(wd.reattachInfo)) + + logging.HelperResourceTrace(ctx, "Calling Terraform CLI apply command") + + return plan, err } // SavedPlanRawStdout returns a human readable stdout capture of the current saved plan file, if any. // // If no plan is saved or if the plan file cannot be read, SavedPlanRawStdout returns // an error. -func (wd *WorkingDir) SavedPlanRawStdout() (string, error) { +func (wd *WorkingDir) SavedPlanRawStdout(ctx context.Context) (string, error) { if !wd.HasSavedPlan() { return "", fmt.Errorf("there is no current saved plan") } @@ -218,7 +264,13 @@ func (wd *WorkingDir) SavedPlanRawStdout() (string, error) { wd.tf.SetStdout(&ret) defer wd.tf.SetStdout(ioutil.Discard) + + logging.HelperResourceTrace(ctx, "Calling Terraform CLI show command") + _, err := wd.tf.ShowPlanFileRaw(context.Background(), wd.planFilename(), tfexec.Reattach(wd.reattachInfo)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI show command") + if err != nil { return "", err } @@ -230,23 +282,47 @@ func (wd *WorkingDir) SavedPlanRawStdout() (string, error) { // // If the state cannot be read, State returns an error. -func (wd *WorkingDir) State() (*tfjson.State, error) { - return wd.tf.Show(context.Background(), tfexec.Reattach(wd.reattachInfo)) +func (wd *WorkingDir) State(ctx context.Context) (*tfjson.State, error) { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI show command") + + state, err := wd.tf.Show(context.Background(), tfexec.Reattach(wd.reattachInfo)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI show command") + + return state, err } // Import runs terraform import -func (wd *WorkingDir) Import(resource, id string) error { - return wd.tf.Import(context.Background(), resource, id, tfexec.Config(wd.baseDir), tfexec.Reattach(wd.reattachInfo)) +func (wd *WorkingDir) Import(ctx context.Context, resource, id string) error { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI import command") + + err := wd.tf.Import(context.Background(), resource, id, tfexec.Config(wd.baseDir), tfexec.Reattach(wd.reattachInfo)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI import command") + + return err } // Refresh runs terraform refresh -func (wd *WorkingDir) Refresh() error { - return wd.tf.Refresh(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.State(filepath.Join(wd.baseDir, "terraform.tfstate"))) +func (wd *WorkingDir) Refresh(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI refresh command") + + err := wd.tf.Refresh(context.Background(), tfexec.Reattach(wd.reattachInfo)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI refresh command") + + return err } // Schemas returns an object describing the provider schemas. // // If the schemas cannot be read, Schemas returns an error. -func (wd *WorkingDir) Schemas() (*tfjson.ProviderSchemas, error) { - return wd.tf.ProvidersSchema(context.Background()) +func (wd *WorkingDir) Schemas(ctx context.Context) (*tfjson.ProviderSchemas, error) { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI providers schema command") + + providerSchemas, err := wd.tf.ProvidersSchema(context.Background()) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI providers schema command") + + return providerSchemas, err } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go index 171451ca7..e1875e134 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go @@ -2,13 +2,7 @@ package plugin import ( "context" - "encoding/json" "errors" - "fmt" - "os" - "os/signal" - "runtime" - "strings" "time" "github.com/hashicorp/go-plugin" @@ -37,6 +31,10 @@ func DebugServe(ctx context.Context, opts *ServeOpts) (ReattachConfig, <-chan st reattachCh := make(chan *plugin.ReattachConfig) closeCh := make(chan struct{}) + if opts == nil { + return ReattachConfig{}, closeCh, errors.New("ServeOpts must be passed in with at least GRPCProviderFunc, GRPCProviderV6Func, or ProviderFunc") + } + opts.TestConfig = &plugin.ServeTestConfig{ Context: ctx, ReattachConfigCh: reattachCh, @@ -71,48 +69,20 @@ func DebugServe(ctx context.Context, opts *ServeOpts) (ReattachConfig, <-chan st // Debug starts a debug server and controls its lifecycle, printing the // information needed for Terraform to connect to the provider to stdout. // os.Interrupt will be captured and used to stop the server. +// +// Deprecated: Use the Serve function with the ServeOpts Debug field instead. func Debug(ctx context.Context, providerAddr string, opts *ServeOpts) error { - ctx, cancel := context.WithCancel(ctx) - // Ctrl-C will stop the server - sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, os.Interrupt) - defer func() { - signal.Stop(sigCh) - cancel() - }() - config, closeCh, err := DebugServe(ctx, opts) - if err != nil { - return fmt.Errorf("Error launching debug server: %w", err) - } - go func() { - select { - case <-sigCh: - cancel() - case <-ctx.Done(): - } - }() - reattachBytes, err := json.Marshal(map[string]ReattachConfig{ - providerAddr: config, - }) - if err != nil { - return fmt.Errorf("Error building reattach string: %w", err) + if opts == nil { + return errors.New("ServeOpts must be passed in with at least GRPCProviderFunc, GRPCProviderV6Func, or ProviderFunc") } - reattachStr := string(reattachBytes) - - fmt.Printf("Provider started, to attach Terraform set the TF_REATTACH_PROVIDERS env var:\n\n") - switch runtime.GOOS { - case "windows": - fmt.Printf("\tCommand Prompt:\tset \"TF_REATTACH_PROVIDERS=%s\"\n", reattachStr) - fmt.Printf("\tPowerShell:\t$env:TF_REATTACH_PROVIDERS='%s'\n", strings.ReplaceAll(reattachStr, `'`, `''`)) - case "linux", "darwin": - fmt.Printf("\tTF_REATTACH_PROVIDERS='%s'\n", strings.ReplaceAll(reattachStr, `'`, `'"'"'`)) - default: - fmt.Println(reattachStr) + opts.Debug = true + + if opts.ProviderAddr == "" { + opts.ProviderAddr = providerAddr } - fmt.Println("") - // wait for the server to be done - <-closeCh + Serve(opts) + return nil } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go index ac126642d..88975da2c 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go @@ -1,12 +1,12 @@ package plugin import ( + "errors" "log" hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" testing "github.com/mitchellh/go-testing-interface" - "google.golang.org/grpc" "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server" @@ -18,10 +18,16 @@ import ( const ( // The constants below are the names of the plugins that can be dispensed // from the plugin server. + // + // Deprecated: This is no longer used, but left for backwards compatibility + // since it is exported. It will be removed in the next major version. ProviderPluginName = "provider" ) // Handshake is the HandshakeConfig used to configure clients and servers. +// +// Deprecated: This is no longer used, but left for backwards compatibility +// since it is exported. It will be removed in the next major version. var Handshake = plugin.HandshakeConfig{ // The magic cookie values should NEVER be changed. MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", @@ -45,11 +51,20 @@ type ServeOpts struct { // Logger is the logger that go-plugin will use. Logger hclog.Logger + // Debug starts a debug server and controls its lifecycle, printing the + // information needed for Terraform to connect to the provider to stdout. + // os.Interrupt will be captured and used to stop the server. + // + // This option cannot be combined with TestConfig. + Debug bool + // TestConfig should only be set when the provider is being tested; it // will opt out of go-plugin's lifecycle management and other features, // and will use the supplied configuration options to control the // plugin's lifecycle and communicate connection information. See the // go-plugin GoDoc for more information. + // + // This option cannot be combined with Debug. TestConfig *plugin.ServeTestConfig // Set NoLogOutputOverride to not override the log output with an hclog @@ -69,6 +84,11 @@ type ServeOpts struct { // Serve serves a plugin. This function never returns and should be the final // function called in the main function of the plugin. func Serve(opts *ServeOpts) { + if opts.Debug && opts.TestConfig != nil { + log.Printf("[ERROR] Error starting provider: cannot set both Debug and TestConfig") + return + } + if !opts.NoLogOutputOverride { // In order to allow go-plugin to correctly pass log-levels through to // terraform, we need to use an hclog.Logger with JSON output. We can @@ -84,65 +104,125 @@ func Serve(opts *ServeOpts) { log.SetOutput(logger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true})) } - // since the plugins may not yet be aware of the new protocol, we - // automatically wrap the plugins in the grpc shims. - if opts.GRPCProviderFunc == nil && opts.ProviderFunc != nil { + if opts.ProviderAddr == "" { + opts.ProviderAddr = "provider" + } + + var err error + + switch { + case opts.ProviderFunc != nil && opts.GRPCProviderFunc == nil: opts.GRPCProviderFunc = func() tfprotov5.ProviderServer { return schema.NewGRPCProviderServer(opts.ProviderFunc()) } + err = tf5serverServe(opts) + case opts.GRPCProviderFunc != nil: + err = tf5serverServe(opts) + case opts.GRPCProviderV6Func != nil: + err = tf6serverServe(opts) + default: + err = errors.New("no provider server defined in ServeOpts") } - serveConfig := plugin.ServeConfig{ - HandshakeConfig: Handshake, - GRPCServer: func(opts []grpc.ServerOption) *grpc.Server { - return grpc.NewServer(opts...) - }, - Logger: opts.Logger, - Test: opts.TestConfig, + if err != nil { + log.Printf("[ERROR] Error starting provider: %s", err) } +} - // assume we have either a v5 or a v6 provider - if opts.GRPCProviderFunc != nil { - provider := opts.GRPCProviderFunc() - addr := opts.ProviderAddr - if addr == "" { - addr = "provider" - } - serveConfig.VersionedPlugins = map[int]plugin.PluginSet{ - 5: { - ProviderPluginName: &tf5server.GRPCProviderPlugin{ - GRPCProvider: func() tfprotov5.ProviderServer { - return provider - }, - Name: addr, - }, - }, - } - if opts.UseTFLogSink != nil { - serveConfig.VersionedPlugins[5][ProviderPluginName].(*tf5server.GRPCProviderPlugin).Opts = append(serveConfig.VersionedPlugins[5][ProviderPluginName].(*tf5server.GRPCProviderPlugin).Opts, tf5server.WithLoggingSink(opts.UseTFLogSink)) - } +func tf5serverServe(opts *ServeOpts) error { + var tf5serveOpts []tf5server.ServeOpt - } else if opts.GRPCProviderV6Func != nil { - provider := opts.GRPCProviderV6Func() - addr := opts.ProviderAddr - if addr == "" { - addr = "provider" - } - serveConfig.VersionedPlugins = map[int]plugin.PluginSet{ - 6: { - ProviderPluginName: &tf6server.GRPCProviderPlugin{ - GRPCProvider: func() tfprotov6.ProviderServer { - return provider - }, - Name: addr, - }, - }, - } - if opts.UseTFLogSink != nil { - serveConfig.VersionedPlugins[6][ProviderPluginName].(*tf6server.GRPCProviderPlugin).Opts = append(serveConfig.VersionedPlugins[6][ProviderPluginName].(*tf6server.GRPCProviderPlugin).Opts, tf6server.WithLoggingSink(opts.UseTFLogSink)) - } + if opts.Debug { + tf5serveOpts = append(tf5serveOpts, tf5server.WithManagedDebug()) + } + + if opts.Logger != nil { + tf5serveOpts = append(tf5serveOpts, tf5server.WithGoPluginLogger(opts.Logger)) + } + + if opts.TestConfig != nil { + // Convert send-only channels to bi-directional channels to appease + // the compiler. WithDebug is errantly defined to require + // bi-directional when send-only is actually needed, which may be + // fixed in the future so the opts.TestConfig channels can be passed + // through directly. + closeCh := make(chan struct{}) + reattachConfigCh := make(chan *plugin.ReattachConfig) + + go func() { + // Always forward close channel receive, since its signaling that + // the channel is closed. + val := <-closeCh + opts.TestConfig.CloseCh <- val + }() + + go func() { + val, ok := <-reattachConfigCh + + if ok { + opts.TestConfig.ReattachConfigCh <- val + } + }() + + tf5serveOpts = append(tf5serveOpts, tf5server.WithDebug( + opts.TestConfig.Context, + reattachConfigCh, + closeCh), + ) + } + + if opts.UseTFLogSink != nil { + tf5serveOpts = append(tf5serveOpts, tf5server.WithLoggingSink(opts.UseTFLogSink)) + } + + return tf5server.Serve(opts.ProviderAddr, opts.GRPCProviderFunc, tf5serveOpts...) +} + +func tf6serverServe(opts *ServeOpts) error { + var tf6serveOpts []tf6server.ServeOpt + + if opts.Debug { + tf6serveOpts = append(tf6serveOpts, tf6server.WithManagedDebug()) + } + + if opts.Logger != nil { + tf6serveOpts = append(tf6serveOpts, tf6server.WithGoPluginLogger(opts.Logger)) + } + + if opts.TestConfig != nil { + // Convert send-only channels to bi-directional channels to appease + // the compiler. WithDebug is errantly defined to require + // bi-directional when send-only is actually needed, which may be + // fixed in the future so the opts.TestConfig channels can be passed + // through directly. + closeCh := make(chan struct{}) + reattachConfigCh := make(chan *plugin.ReattachConfig) + + go func() { + // Always forward close channel receive, since its signaling that + // the channel is closed. + val := <-closeCh + opts.TestConfig.CloseCh <- val + }() + + go func() { + val, ok := <-reattachConfigCh + + if ok { + opts.TestConfig.ReattachConfigCh <- val + } + }() + + tf6serveOpts = append(tf6serveOpts, tf6server.WithDebug( + opts.TestConfig.Context, + reattachConfigCh, + closeCh), + ) + } + if opts.UseTFLogSink != nil { + tf6serveOpts = append(tf6serveOpts, tf6server.WithLoggingSink(opts.UseTFLogSink)) } - plugin.Serve(&serveConfig) + return tf6server.Serve(opts.ProviderAddr, opts.GRPCProviderV6Func, tf6serveOpts...) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go index 0511a1fc5..8a2940f31 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go @@ -20,7 +20,7 @@ import ( type diffChangeType byte const ( - diffInvalid diffChangeType = iota + diffInvalid diffChangeType = iota //nolint:deadcode,varcheck diffNone diffCreate diffUpdate diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go index 11b63de8a..703547745 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go @@ -154,13 +154,13 @@ func (c *ResourceConfig) DeepCopy() *ResourceConfig { } // Copy, this will copy all the exported attributes - copy, err := copystructure.Config{Lock: true}.Copy(c) + copiedConfig, err := copystructure.Config{Lock: true}.Copy(c) if err != nil { panic(err) } // Force the type - result := copy.(*ResourceConfig) + result := copiedConfig.(*ResourceConfig) return result } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go index 7f8655ee7..170e20cd0 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go @@ -366,11 +366,11 @@ func (s *State) Remove(addr ...string) error { switch v := r.Value.(type) { case *ModuleState: - s.removeModule(path, v) + s.removeModule(v) case *ResourceState: s.removeResource(path, v) case *InstanceState: - s.removeInstance(path, r.Parent.Value.(*ResourceState), v) + s.removeInstance(r.Parent.Value.(*ResourceState), v) default: return fmt.Errorf("unknown type to delete: %T", r.Value) } @@ -384,7 +384,7 @@ func (s *State) Remove(addr ...string) error { return nil } -func (s *State) removeModule(path []string, v *ModuleState) { +func (s *State) removeModule(v *ModuleState) { for i, m := range s.Modules { if m == v { s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil @@ -412,7 +412,7 @@ func (s *State) removeResource(path []string, v *ResourceState) { } } -func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) { +func (s *State) removeInstance(r *ResourceState, v *InstanceState) { // Go through the resource and find the instance that matches this // (if any) and remove it. @@ -421,20 +421,6 @@ func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState r.Primary = nil return } - - // Check lists - lists := [][]*InstanceState{r.Deposed} - for _, is := range lists { - for i, instance := range is { - if instance == v { - // Found it, remove it - is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil - - // Done - return - } - } - } } // RootModule returns the ModuleState for the root module @@ -562,12 +548,12 @@ func (s *State) DeepCopy() *State { return nil } - copy, err := copystructure.Config{Lock: true}.Copy(s) + copiedState, err := copystructure.Config{Lock: true}.Copy(s) if err != nil { panic(err) } - return copy.(*State) + return copiedState.(*State) } func (s *State) Init() { @@ -1023,7 +1009,7 @@ func (m *ModuleState) String() string { } if len(rs.Dependencies) > 0 { - buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) + buf.WriteString("\n Dependencies:\n") for _, dep := range rs.Dependencies { buf.WriteString(fmt.Sprintf(" %s\n", dep)) } @@ -1237,11 +1223,7 @@ func (s *ResourceState) Equal(other *ResourceState) bool { } // States must be equal - if !s.Primary.Equal(other.Primary) { - return false - } - - return true + return s.Primary.Equal(other.Primary) } // Taint marks a resource as tainted. @@ -1430,12 +1412,12 @@ func (s *InstanceState) Set(from *InstanceState) { } func (s *InstanceState) DeepCopy() *InstanceState { - copy, err := copystructure.Config{Lock: true}.Copy(s) + copiedState, err := copystructure.Config{Lock: true}.Copy(s) if err != nil { panic(err) } - return copy.(*InstanceState) + return copiedState.(*InstanceState) } func (s *InstanceState) Empty() bool { diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go index 01d039272..d2229f8ce 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go @@ -24,7 +24,15 @@ type stateFilter struct { // parseResourceAddress. func (f *stateFilter) filter(fs ...string) ([]*stateFilterResult, error) { // Parse all the addresses - as := make([]*resourceAddress, len(fs)) + var as []*resourceAddress + + if len(fs) == 0 { + // If we weren't given any filters, then we list all + as = []*resourceAddress{{Index: -1}} + } else { + as = make([]*resourceAddress, len(fs)) + } + for i, v := range fs { a, err := parseResourceAddress(v) if err != nil { @@ -34,11 +42,6 @@ func (f *stateFilter) filter(fs ...string) ([]*stateFilterResult, error) { as[i] = a } - // If we weren't given any filters, then we list all - if len(fs) == 0 { - as = append(as, &resourceAddress{Index: -1}) - } - // Filter each of the address. We keep track of this in a map to // strip duplicates. resultSet := make(map[string]*stateFilterResult) diff --git a/vendor/github.com/hashicorp/terraform-registry-address/.go-version b/vendor/github.com/hashicorp/terraform-registry-address/.go-version index 42cf0675c..adc97d8e2 100644 --- a/vendor/github.com/hashicorp/terraform-registry-address/.go-version +++ b/vendor/github.com/hashicorp/terraform-registry-address/.go-version @@ -1 +1 @@ -1.15.2 +1.18 diff --git a/vendor/github.com/hashicorp/terraform-registry-address/README.md b/vendor/github.com/hashicorp/terraform-registry-address/README.md index 2ed8398da..091ff7014 100644 --- a/vendor/github.com/hashicorp/terraform-registry-address/README.md +++ b/vendor/github.com/hashicorp/terraform-registry-address/README.md @@ -8,7 +8,9 @@ The most common source of these addresses outside of Terraform Core is JSON representation of state, plan, or schemas as obtained via [`hashicorp/terraform-exec`](https://github.com/hashicorp/terraform-exec). -## Example +## Parsing Provider Addresses + +### Example ```go p, err := ParseRawProviderSourceString("hashicorp/aws") @@ -23,7 +25,7 @@ if err != nil { // } ``` -## Legacy address +### Legacy address A legacy address is by itself (without more context) ambiguous. For example `aws` may represent either the official `hashicorp/aws` @@ -36,7 +38,7 @@ the address was produced by an affected version. If you do not have that context you should parse the string via `ParseRawProviderSourceString` and then check `addr.IsLegacy()`. -### What to do with a legacy address? +#### What to do with a legacy address? Ask the Registry API whether and where the provider was moved to @@ -70,7 +72,7 @@ If you cache results (which you should), ensure you have invalidation mechanism in place because target (migrated) namespace may change. Hard-coding migrations anywhere in code is strongly discouraged. -### `terraform` provider +#### `terraform` provider Like any other legacy address `terraform` is also ambiguous. Such address may (most unlikely) represent a custom-built provider called `terraform`, @@ -86,3 +88,24 @@ i.e. assume all of its logic including schema is contained within Terraform Core. In such case you should just use `NewBuiltInProvider("terraform")`. + +## Parsing Module Addresses + +### Example + +```go +registry, err := ParseRawModuleSourceRegistry("hashicorp/subnets/cidr") +if err != nil { + // deal with error +} + +// registry == ModuleSourceRegistry{ +// PackageAddr: ModuleRegistryPackage{ +// Host: svchost.Hostname("registry.terraform.io"), +// Namespace: "hashicorp", +// Name: "subnets", +// TargetSystem: "cidr", +// }, +// Subdir: "", +// }, +``` diff --git a/vendor/github.com/hashicorp/terraform-registry-address/go.mod b/vendor/github.com/hashicorp/terraform-registry-address/go.mod index a82603ea5..44328056f 100644 --- a/vendor/github.com/hashicorp/terraform-registry-address/go.mod +++ b/vendor/github.com/hashicorp/terraform-registry-address/go.mod @@ -3,7 +3,7 @@ module github.com/hashicorp/terraform-registry-address go 1.14 require ( - github.com/google/go-cmp v0.5.5 + github.com/google/go-cmp v0.5.8 github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 golang.org/x/net v0.0.0-20210119194325-5f4716e94777 ) diff --git a/vendor/github.com/hashicorp/terraform-registry-address/go.sum b/vendor/github.com/hashicorp/terraform-registry-address/go.sum index 85038dfa9..83124050c 100644 --- a/vendor/github.com/hashicorp/terraform-registry-address/go.sum +++ b/vendor/github.com/hashicorp/terraform-registry-address/go.sum @@ -1,20 +1,16 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= @@ -36,8 +32,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/hashicorp/terraform-registry-address/module.go b/vendor/github.com/hashicorp/terraform-registry-address/module.go new file mode 100644 index 000000000..f90279ec2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-registry-address/module.go @@ -0,0 +1,241 @@ +package tfaddr + +import ( + "fmt" + "path" + "regexp" + "strings" + + svchost "github.com/hashicorp/terraform-svchost" +) + +// ModuleSourceRegistry is representing a module listed in a Terraform module +// registry. +type ModuleSourceRegistry struct { + // PackageAddr is the registry package that the target module belongs to. + // The module installer must translate this into a ModuleSourceRemote + // using the registry API and then take that underlying address's + // PackageAddr in order to find the actual package location. + PackageAddr ModuleRegistryPackage + + // If Subdir is non-empty then it represents a sub-directory within the + // remote package that the registry address eventually resolves to. + // This will ultimately become the suffix of the Subdir of the + // ModuleSourceRemote that the registry address translates to. + // + // Subdir uses a normalized forward-slash-based path syntax within the + // virtual filesystem represented by the final package. It will never + // include `../` or `./` sequences. + Subdir string +} + +// DefaultModuleRegistryHost is the hostname used for registry-based module +// source addresses that do not have an explicit hostname. +const DefaultModuleRegistryHost = svchost.Hostname("registry.terraform.io") + +var moduleRegistryNamePattern = regexp.MustCompile("^[0-9A-Za-z](?:[0-9A-Za-z-_]{0,62}[0-9A-Za-z])?$") +var moduleRegistryTargetSystemPattern = regexp.MustCompile("^[0-9a-z]{1,64}$") + +// ParseRawModuleSourceRegistry only accepts module registry addresses, and +// will reject any other address type. +func ParseRawModuleSourceRegistry(raw string) (ModuleSourceRegistry, error) { + var err error + + var subDir string + raw, subDir = splitPackageSubdir(raw) + if strings.HasPrefix(subDir, "../") { + return ModuleSourceRegistry{}, fmt.Errorf("subdirectory path %q leads outside of the module package", subDir) + } + + parts := strings.Split(raw, "/") + // A valid registry address has either three or four parts, because the + // leading hostname part is optional. + if len(parts) != 3 && len(parts) != 4 { + return ModuleSourceRegistry{}, fmt.Errorf("a module registry source address must have either three or four slash-separated components") + } + + host := DefaultModuleRegistryHost + if len(parts) == 4 { + host, err = svchost.ForComparison(parts[0]) + if err != nil { + // The svchost library doesn't produce very good error messages to + // return to an end-user, so we'll use some custom ones here. + switch { + case strings.Contains(parts[0], "--"): + // Looks like possibly punycode, which we don't allow here + // to ensure that source addresses are written readably. + return ModuleSourceRegistry{}, fmt.Errorf("invalid module registry hostname %q; internationalized domain names must be given as direct unicode characters, not in punycode", parts[0]) + default: + return ModuleSourceRegistry{}, fmt.Errorf("invalid module registry hostname %q", parts[0]) + } + } + if !strings.Contains(host.String(), ".") { + return ModuleSourceRegistry{}, fmt.Errorf("invalid module registry hostname: must contain at least one dot") + } + // Discard the hostname prefix now that we've processed it + parts = parts[1:] + } + + ret := ModuleSourceRegistry{ + PackageAddr: ModuleRegistryPackage{ + Host: host, + }, + + Subdir: subDir, + } + + if host == svchost.Hostname("github.com") || host == svchost.Hostname("bitbucket.org") { + return ret, fmt.Errorf("can't use %q as a module registry host, because it's reserved for installing directly from version control repositories", host) + } + + if ret.PackageAddr.Namespace, err = parseModuleRegistryName(parts[0]); err != nil { + if strings.Contains(parts[0], ".") { + // Seems like the user omitted one of the latter components in + // an address with an explicit hostname. + return ret, fmt.Errorf("source address must have three more components after the hostname: the namespace, the name, and the target system") + } + return ret, fmt.Errorf("invalid namespace %q: %s", parts[0], err) + } + if ret.PackageAddr.Name, err = parseModuleRegistryName(parts[1]); err != nil { + return ret, fmt.Errorf("invalid module name %q: %s", parts[1], err) + } + if ret.PackageAddr.TargetSystem, err = parseModuleRegistryTargetSystem(parts[2]); err != nil { + if strings.Contains(parts[2], "?") { + // The user was trying to include a query string, probably? + return ret, fmt.Errorf("module registry addresses may not include a query string portion") + } + return ret, fmt.Errorf("invalid target system %q: %s", parts[2], err) + } + + return ret, nil +} + +// parseModuleRegistryName validates and normalizes a string in either the +// "namespace" or "name" position of a module registry source address. +func parseModuleRegistryName(given string) (string, error) { + // Similar to the names in provider source addresses, we defined these + // to be compatible with what filesystems and typical remote systems + // like GitHub allow in names. Unfortunately we didn't end up defining + // these exactly equivalently: provider names can only use dashes as + // punctuation, whereas module names can use underscores. So here we're + // using some regular expressions from the original module source + // implementation, rather than using the IDNA rules as we do in + // ParseProviderPart. + + if !moduleRegistryNamePattern.MatchString(given) { + return "", fmt.Errorf("must be between one and 64 characters, including ASCII letters, digits, dashes, and underscores, where dashes and underscores may not be the prefix or suffix") + } + + // We also skip normalizing the name to lowercase, because we historically + // didn't do that and so existing module registries might be doing + // case-sensitive matching. + return given, nil +} + +// parseModuleRegistryTargetSystem validates and normalizes a string in the +// "target system" position of a module registry source address. This is +// what we historically called "provider" but never actually enforced as +// being a provider address, and now _cannot_ be a provider address because +// provider addresses have three slash-separated components of their own. +func parseModuleRegistryTargetSystem(given string) (string, error) { + // Similar to the names in provider source addresses, we defined these + // to be compatible with what filesystems and typical remote systems + // like GitHub allow in names. Unfortunately we didn't end up defining + // these exactly equivalently: provider names can't use dashes or + // underscores. So here we're using some regular expressions from the + // original module source implementation, rather than using the IDNA rules + // as we do in ParseProviderPart. + + if !moduleRegistryTargetSystemPattern.MatchString(given) { + return "", fmt.Errorf("must be between one and 64 ASCII letters or digits") + } + + // We also skip normalizing the name to lowercase, because we historically + // didn't do that and so existing module registries might be doing + // case-sensitive matching. + return given, nil +} + +// String returns a full representation of the address, including any +// additional components that are typically implied by omission in +// user-written addresses. +// +// We typically use this longer representation in error message, in case +// the inclusion of normally-omitted components is helpful in debugging +// unexpected behavior. +func (s ModuleSourceRegistry) String() string { + if s.Subdir != "" { + return s.PackageAddr.String() + "//" + s.Subdir + } + return s.PackageAddr.String() +} + +// ForDisplay is similar to String but instead returns a representation of +// the idiomatic way to write the address in configuration, omitting +// components that are commonly just implied in addresses written by +// users. +// +// We typically use this shorter representation in informational messages, +// such as the note that we're about to start downloading a package. +func (s ModuleSourceRegistry) ForDisplay() string { + if s.Subdir != "" { + return s.PackageAddr.ForDisplay() + "//" + s.Subdir + } + return s.PackageAddr.ForDisplay() +} + +// splitPackageSubdir detects whether the given address string has a +// subdirectory portion, and if so returns a non-empty subDir string +// along with the trimmed package address. +// +// If the given string doesn't have a subdirectory portion then it'll +// just be returned verbatim in packageAddr, with an empty subDir value. +func splitPackageSubdir(given string) (packageAddr, subDir string) { + packageAddr, subDir = sourceDirSubdir(given) + if subDir != "" { + subDir = path.Clean(subDir) + } + return packageAddr, subDir +} + +// sourceDirSubdir takes a source URL and returns a tuple of the URL without +// the subdir and the subdir. +// +// ex: +// dom.com/path/?q=p => dom.com/path/?q=p, "" +// proto://dom.com/path//*?q=p => proto://dom.com/path?q=p, "*" +// proto://dom.com/path//path2?q=p => proto://dom.com/path?q=p, "path2" +func sourceDirSubdir(src string) (string, string) { + // URL might contains another url in query parameters + stop := len(src) + if idx := strings.Index(src, "?"); idx > -1 { + stop = idx + } + + // Calculate an offset to avoid accidentally marking the scheme + // as the dir. + var offset int + if idx := strings.Index(src[:stop], "://"); idx > -1 { + offset = idx + 3 + } + + // First see if we even have an explicit subdir + idx := strings.Index(src[offset:stop], "//") + if idx == -1 { + return src, "" + } + + idx += offset + subdir := src[idx+2:] + src = src[:idx] + + // Next, check if we have query parameters and push them onto the + // URL. + if idx = strings.Index(subdir, "?"); idx > -1 { + query := subdir[idx:] + subdir = subdir[:idx] + src += query + } + + return src, subdir +} diff --git a/vendor/github.com/hashicorp/terraform-registry-address/module_package.go b/vendor/github.com/hashicorp/terraform-registry-address/module_package.go new file mode 100644 index 000000000..cba03a7fd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-registry-address/module_package.go @@ -0,0 +1,87 @@ +package tfaddr + +import ( + "strings" + + svchost "github.com/hashicorp/terraform-svchost" +) + +// A ModulePackage represents a physical location where Terraform can retrieve +// a module package, which is an archive, repository, or other similar +// container which delivers the source code for one or more Terraform modules. +// +// A ModulePackage is a string in go-getter's address syntax. By convention, +// we use ModulePackage-typed values only for the result of successfully +// running the go-getter "detectors", which produces an address string which +// includes an explicit installation method prefix along with an address +// string in the format expected by that installation method. +// +// Note that although the "detector" phase of go-getter does do some simple +// normalization in certain cases, it isn't generally possible to compare +// two ModulePackage values to decide if they refer to the same package. Two +// equal ModulePackage values represent the same package, but there might be +// other non-equal ModulePackage values that also refer to that package, and +// there is no reliable way to determine that. +// +// Don't convert a user-provided string directly to ModulePackage. Instead, +// use ParseModuleSource with a remote module address and then access the +// ModulePackage value from the result, making sure to also handle the +// selected subdirectory if any. You should convert directly to ModulePackage +// only for a string that is hard-coded into the program (e.g. in a unit test) +// where you've ensured that it's already in the expected syntax. +type ModulePackage string + +func (p ModulePackage) String() string { + return string(p) +} + +// A ModuleRegistryPackage is an extra indirection over a ModulePackage where +// we use a module registry to translate a more symbolic address (and +// associated version constraint given out of band) into a physical source +// location. +// +// ModuleRegistryPackage is distinct from ModulePackage because they have +// disjoint use-cases: registry package addresses are only used to query a +// registry in order to find a real module package address. These being +// distinct is intended to help future maintainers more easily follow the +// series of steps in the module installer, with the help of the type checker. +type ModuleRegistryPackage struct { + Host svchost.Hostname + Namespace string + Name string + TargetSystem string +} + +func (s ModuleRegistryPackage) String() string { + // Note: we're using the "display" form of the hostname here because + // for our service hostnames "for display" means something different: + // it means to render non-ASCII characters directly as Unicode + // characters, rather than using the "punycode" representation we + // use for internal processing, and so the "display" representation + // is actually what users would write in their configurations. + return s.Host.ForDisplay() + "/" + s.ForRegistryProtocol() +} + +func (s ModuleRegistryPackage) ForDisplay() string { + if s.Host == DefaultModuleRegistryHost { + return s.ForRegistryProtocol() + } + return s.Host.ForDisplay() + "/" + s.ForRegistryProtocol() +} + +// ForRegistryProtocol returns a string representation of just the namespace, +// name, and target system portions of the address, always omitting the +// registry hostname and the subdirectory portion, if any. +// +// This is primarily intended for generating addresses to send to the +// registry in question via the registry protocol, since the protocol +// skips sending the registry its own hostname as part of identifiers. +func (s ModuleRegistryPackage) ForRegistryProtocol() string { + var buf strings.Builder + buf.WriteString(s.Namespace) + buf.WriteByte('/') + buf.WriteString(s.Name) + buf.WriteByte('/') + buf.WriteString(s.TargetSystem) + return buf.String() +} diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go index be6ebca9c..f6a00199c 100644 --- a/vendor/github.com/hashicorp/yamux/addr.go +++ b/vendor/github.com/hashicorp/yamux/addr.go @@ -54,7 +54,7 @@ func (s *Stream) LocalAddr() net.Addr { return s.session.LocalAddr() } -// LocalAddr returns the remote address +// RemoteAddr returns the remote address func (s *Stream) RemoteAddr() net.Addr { return s.session.RemoteAddr() } diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go index 4f5293828..2fdbf844a 100644 --- a/vendor/github.com/hashicorp/yamux/const.go +++ b/vendor/github.com/hashicorp/yamux/const.go @@ -5,6 +5,25 @@ import ( "fmt" ) +// NetError implements net.Error +type NetError struct { + err error + timeout bool + temporary bool +} + +func (e *NetError) Error() string { + return e.err.Error() +} + +func (e *NetError) Timeout() bool { + return e.timeout +} + +func (e *NetError) Temporary() bool { + return e.temporary +} + var ( // ErrInvalidVersion means we received a frame with an // invalid version @@ -30,7 +49,13 @@ var ( ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded") // ErrTimeout is used when we reach an IO deadline - ErrTimeout = fmt.Errorf("i/o deadline reached") + ErrTimeout = &NetError{ + err: fmt.Errorf("i/o deadline reached"), + + // Error should meet net.Error interface for timeouts for compatability + // with standard library expectations, such as http servers. + timeout: true, + } // ErrStreamClosed is returned when using a closed stream ErrStreamClosed = fmt.Errorf("stream closed") diff --git a/vendor/github.com/hashicorp/yamux/go.mod b/vendor/github.com/hashicorp/yamux/go.mod index 672a0e581..dd8974d3f 100644 --- a/vendor/github.com/hashicorp/yamux/go.mod +++ b/vendor/github.com/hashicorp/yamux/go.mod @@ -1 +1,3 @@ module github.com/hashicorp/yamux + +go 1.15 diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go index 18a078c8a..0c3e67b02 100644 --- a/vendor/github.com/hashicorp/yamux/mux.go +++ b/vendor/github.com/hashicorp/yamux/mux.go @@ -31,6 +31,20 @@ type Config struct { // window size that we allow for a stream. MaxStreamWindowSize uint32 + // StreamOpenTimeout is the maximum amount of time that a stream will + // be allowed to remain in pending state while waiting for an ack from the peer. + // Once the timeout is reached the session will be gracefully closed. + // A zero value disables the StreamOpenTimeout allowing unbounded + // blocking on OpenStream calls. + StreamOpenTimeout time.Duration + + // StreamCloseTimeout is the maximum time that a stream will allowed to + // be in a half-closed state when `Close` is called before forcibly + // closing the connection. Forcibly closed connections will empty the + // receive buffer, drop any future packets received for that stream, + // and send a RST to the remote side. + StreamCloseTimeout time.Duration + // LogOutput is used to control the log destination. Either Logger or // LogOutput can be set, not both. LogOutput io.Writer @@ -48,6 +62,8 @@ func DefaultConfig() *Config { KeepAliveInterval: 30 * time.Second, ConnectionWriteTimeout: 10 * time.Second, MaxStreamWindowSize: initialStreamWindow, + StreamCloseTimeout: 5 * time.Minute, + StreamOpenTimeout: 75 * time.Second, LogOutput: os.Stderr, } } diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go index a80ddec35..046a3d301 100644 --- a/vendor/github.com/hashicorp/yamux/session.go +++ b/vendor/github.com/hashicorp/yamux/session.go @@ -80,7 +80,7 @@ type Session struct { // or to directly send a header type sendReady struct { Hdr []byte - Body io.Reader + Body []byte Err chan error } @@ -184,6 +184,10 @@ GET_ID: s.inflight[id] = struct{}{} s.streamLock.Unlock() + if s.config.StreamOpenTimeout > 0 { + go s.setOpenTimeout(stream) + } + // Send the window update to create if err := stream.sendWindowUpdate(); err != nil { select { @@ -196,6 +200,27 @@ GET_ID: return stream, nil } +// setOpenTimeout implements a timeout for streams that are opened but not established. +// If the StreamOpenTimeout is exceeded we assume the peer is unable to ACK, +// and close the session. +// The number of running timers is bounded by the capacity of the synCh. +func (s *Session) setOpenTimeout(stream *Stream) { + timer := time.NewTimer(s.config.StreamOpenTimeout) + defer timer.Stop() + + select { + case <-stream.establishCh: + return + case <-s.shutdownCh: + return + case <-timer.C: + // Timeout reached while waiting for ACK. + // Close the session to force connection re-establishment. + s.logger.Printf("[ERR] yamux: aborted stream open (destination=%s): %v", s.RemoteAddr().String(), ErrTimeout.err) + s.Close() + } +} + // Accept is used to block until the next available stream // is ready to be accepted. func (s *Session) Accept() (net.Conn, error) { @@ -327,7 +352,7 @@ func (s *Session) keepalive() { } // waitForSendErr waits to send a header, checking for a potential shutdown -func (s *Session) waitForSend(hdr header, body io.Reader) error { +func (s *Session) waitForSend(hdr header, body []byte) error { errCh := make(chan error, 1) return s.waitForSendErr(hdr, body, errCh) } @@ -335,7 +360,7 @@ func (s *Session) waitForSend(hdr header, body io.Reader) error { // waitForSendErr waits to send a header with optional data, checking for a // potential shutdown. Since there's the expectation that sends can happen // in a timely manner, we enforce the connection write timeout here. -func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { +func (s *Session) waitForSendErr(hdr header, body []byte, errCh chan error) error { t := timerPool.Get() timer := t.(*time.Timer) timer.Reset(s.config.ConnectionWriteTimeout) @@ -415,7 +440,7 @@ func (s *Session) send() { // Send data from a body if given if ready.Body != nil { - _, err := io.Copy(s.conn, ready.Body) + _, err := s.conn.Write(ready.Body) if err != nil { s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) asyncSendErr(ready.Err, err) diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go index aa2391973..f444bdc3c 100644 --- a/vendor/github.com/hashicorp/yamux/stream.go +++ b/vendor/github.com/hashicorp/yamux/stream.go @@ -49,6 +49,13 @@ type Stream struct { readDeadline atomic.Value // time.Time writeDeadline atomic.Value // time.Time + + // establishCh is notified if the stream is established or being closed. + establishCh chan struct{} + + // closeTimer is set with stateLock held to honor the StreamCloseTimeout + // setting on Session. + closeTimer *time.Timer } // newStream is used to construct a new stream within @@ -66,6 +73,7 @@ func newStream(session *Session, id uint32, state streamState) *Stream { sendWindow: initialStreamWindow, recvNotifyCh: make(chan struct{}, 1), sendNotifyCh: make(chan struct{}, 1), + establishCh: make(chan struct{}, 1), } s.readDeadline.Store(time.Time{}) s.writeDeadline.Store(time.Time{}) @@ -161,7 +169,7 @@ func (s *Stream) Write(b []byte) (n int, err error) { func (s *Stream) write(b []byte) (n int, err error) { var flags uint16 var max uint32 - var body io.Reader + var body []byte START: s.stateLock.Lock() switch s.state { @@ -187,7 +195,7 @@ START: // Send up to our send window max = min(window, uint32(len(b))) - body = bytes.NewReader(b[:max]) + body = b[:max] // Send the header s.sendHdr.encode(typeData, flags, s.id, max) @@ -312,6 +320,27 @@ func (s *Stream) Close() error { s.stateLock.Unlock() return nil SEND_CLOSE: + // This shouldn't happen (the more realistic scenario to cancel the + // timer is via processFlags) but just in case this ever happens, we + // cancel the timer to prevent dangling timers. + if s.closeTimer != nil { + s.closeTimer.Stop() + s.closeTimer = nil + } + + // If we have a StreamCloseTimeout set we start the timeout timer. + // We do this only if we're not already closing the stream since that + // means this was a graceful close. + // + // This prevents memory leaks if one side (this side) closes and the + // remote side poorly behaves and never responds with a FIN to complete + // the close. After the specified timeout, we clean our resources up no + // matter what. + if !closeStream && s.session.config.StreamCloseTimeout > 0 { + s.closeTimer = time.AfterFunc( + s.session.config.StreamCloseTimeout, s.closeTimeout) + } + s.stateLock.Unlock() s.sendClose() s.notifyWaiting() @@ -321,6 +350,22 @@ SEND_CLOSE: return nil } +// closeTimeout is called after StreamCloseTimeout during a close to +// close this stream. +func (s *Stream) closeTimeout() { + // Close our side forcibly + s.forceClose() + + // Free the stream from the session map + s.session.closeStream(s.id) + + // Send a RST so the remote side closes too. + s.sendLock.Lock() + defer s.sendLock.Unlock() + s.sendHdr.encode(typeWindowUpdate, flagRST, s.id, 0) + s.session.sendNoWait(s.sendHdr) +} + // forceClose is used for when the session is exiting func (s *Stream) forceClose() { s.stateLock.Lock() @@ -332,20 +377,27 @@ func (s *Stream) forceClose() { // processFlags is used to update the state of the stream // based on set flags, if any. Lock must be held func (s *Stream) processFlags(flags uint16) error { + s.stateLock.Lock() + defer s.stateLock.Unlock() + // Close the stream without holding the state lock closeStream := false defer func() { if closeStream { + if s.closeTimer != nil { + // Stop our close timeout timer since we gracefully closed + s.closeTimer.Stop() + } + s.session.closeStream(s.id) } }() - s.stateLock.Lock() - defer s.stateLock.Unlock() if flags&flagACK == flagACK { if s.state == streamSYNSent { s.state = streamEstablished } + asyncNotify(s.establishCh) s.session.establishStream(s.id) } if flags&flagFIN == flagFIN { @@ -378,6 +430,7 @@ func (s *Stream) processFlags(flags uint16) error { func (s *Stream) notifyWaiting() { asyncNotify(s.recvNotifyCh) asyncNotify(s.sendNotifyCh) + asyncNotify(s.establishCh) } // incrSendWindow updates the size of our send window @@ -446,15 +499,17 @@ func (s *Stream) SetDeadline(t time.Time) error { return nil } -// SetReadDeadline sets the deadline for future Read calls. +// SetReadDeadline sets the deadline for blocked and future Read calls. func (s *Stream) SetReadDeadline(t time.Time) error { s.readDeadline.Store(t) + asyncNotify(s.recvNotifyCh) return nil } -// SetWriteDeadline sets the deadline for future Write calls +// SetWriteDeadline sets the deadline for blocked and future Write calls func (s *Stream) SetWriteDeadline(t time.Time) error { s.writeDeadline.Store(t) + asyncNotify(s.sendNotifyCh) return nil } diff --git a/vendor/github.com/heimweh/go-pagerduty/pagerduty/event_orchestration_path.go b/vendor/github.com/heimweh/go-pagerduty/pagerduty/event_orchestration_path.go index ddc7e0091..ebfac32dd 100644 --- a/vendor/github.com/heimweh/go-pagerduty/pagerduty/event_orchestration_path.go +++ b/vendor/github.com/heimweh/go-pagerduty/pagerduty/event_orchestration_path.go @@ -88,9 +88,9 @@ type EventOrchestrationPathActionVariables struct { type EventOrchestrationPathActionExtractions struct { Target string `json:"target,omitempty"` - Regex string `json:"regex,omitempty"` + Regex string `json:"regex,omitempty"` Template string `json:"template,omitempty"` - Source string `json:"source,omitempty"` + Source string `json:"source,omitempty"` } type EventOrchestrationPathCatchAll struct { diff --git a/vendor/github.com/heimweh/go-pagerduty/pagerduty/references.go b/vendor/github.com/heimweh/go-pagerduty/pagerduty/references.go index 8716eb0f2..1cbe818a8 100644 --- a/vendor/github.com/heimweh/go-pagerduty/pagerduty/references.go +++ b/vendor/github.com/heimweh/go-pagerduty/pagerduty/references.go @@ -19,6 +19,9 @@ type UserReference resourceReference // EscalationPolicyReference represents a reference to an escalation policy. type EscalationPolicyReference resourceReference +// ResponsePlayReference represents a reference to a response play. +type ResponsePlayReference resourceReference + // ScheduleReference represents a reference to a schedule. type ScheduleReference resourceReference diff --git a/vendor/github.com/heimweh/go-pagerduty/pagerduty/service.go b/vendor/github.com/heimweh/go-pagerduty/pagerduty/service.go index b51f12036..ffb128b46 100644 --- a/vendor/github.com/heimweh/go-pagerduty/pagerduty/service.go +++ b/vendor/github.com/heimweh/go-pagerduty/pagerduty/service.go @@ -49,6 +49,12 @@ type AlertGroupingParameters struct { Config *AlertGroupingConfig `json:"config,omitempty"` } +// AutoPauseNotificationsParameters defines how alerts on this service are automatically suspended for a period of time before triggering, when identified as likely being transient. +type AutoPauseNotificationsParameters struct { + Enabled bool `json:enabled,omitempty` + Timeout int `json:timeout,omitempty` +} + // IncidentUrgencyRule is the default urgency for new incidents. type IncidentUrgencyRule struct { DuringSupportHours *IncidentUrgencyType `json:"during_support_hours,omitempty"` @@ -123,29 +129,31 @@ type ValueExtractor struct { // Service represents a service. type Service struct { - AcknowledgementTimeout *int `json:"acknowledgement_timeout"` - Addons []*AddonReference `json:"addons,omitempty"` - AlertCreation string `json:"alert_creation,omitempty"` - AlertGrouping *string `json:"alert_grouping"` - AlertGroupingTimeout *int `json:"alert_grouping_timeout,omitempty"` - AlertGroupingParameters *AlertGroupingParameters `json:"alert_grouping_parameters,omitempty"` - AutoResolveTimeout *int `json:"auto_resolve_timeout"` - CreatedAt string `json:"created_at,omitempty"` - Description string `json:"description,omitempty"` - EscalationPolicy *EscalationPolicyReference `json:"escalation_policy,omitempty"` - HTMLURL string `json:"html_url,omitempty"` - ID string `json:"id,omitempty"` - IncidentUrgencyRule *IncidentUrgencyRule `json:"incident_urgency_rule,omitempty"` - Integrations []*IntegrationReference `json:"integrations,omitempty"` - LastIncidentTimestamp string `json:"last_incident_timestamp,omitempty"` - Name string `json:"name,omitempty"` - ScheduledActions []*ScheduledAction `json:"scheduled_actions,omitempty"` - Self string `json:"self,omitempty"` - Status string `json:"status,omitempty"` - Summary string `json:"summary,omitempty"` - SupportHours *SupportHours `json:"support_hours,omitempty"` - Teams []*TeamReference `json:"teams,omitempty"` - Type string `json:"type,omitempty"` + AcknowledgementTimeout *int `json:"acknowledgement_timeout"` + Addons []*AddonReference `json:"addons,omitempty"` + AlertCreation string `json:"alert_creation,omitempty"` + AlertGrouping *string `json:"alert_grouping"` + AlertGroupingTimeout *int `json:"alert_grouping_timeout,omitempty"` + AlertGroupingParameters *AlertGroupingParameters `json:"alert_grouping_parameters,omitempty"` + AutoPauseNotificationsParameters *AutoPauseNotificationsParameters `json:"auto_pause_notifications_parameters,omitempty"` + AutoResolveTimeout *int `json:"auto_resolve_timeout"` + CreatedAt string `json:"created_at,omitempty"` + Description string `json:"description,omitempty"` + EscalationPolicy *EscalationPolicyReference `json:"escalation_policy,omitempty"` + ResponsePlay *ResponsePlayReference `json:"response_play,omitempty"` + HTMLURL string `json:"html_url,omitempty"` + ID string `json:"id,omitempty"` + IncidentUrgencyRule *IncidentUrgencyRule `json:"incident_urgency_rule,omitempty"` + Integrations []*IntegrationReference `json:"integrations,omitempty"` + LastIncidentTimestamp string `json:"last_incident_timestamp,omitempty"` + Name string `json:"name,omitempty"` + ScheduledActions []*ScheduledAction `json:"scheduled_actions,omitempty"` + Self string `json:"self,omitempty"` + Status string `json:"status,omitempty"` + Summary string `json:"summary,omitempty"` + SupportHours *SupportHours `json:"support_hours,omitempty"` + Teams []*TeamReference `json:"teams,omitempty"` + Type string `json:"type,omitempty"` } // ServicePayload represents a service. diff --git a/vendor/github.com/heimweh/go-pagerduty/pagerduty/user.go b/vendor/github.com/heimweh/go-pagerduty/pagerduty/user.go index 9e032e81b..bbbb3798f 100644 --- a/vendor/github.com/heimweh/go-pagerduty/pagerduty/user.go +++ b/vendor/github.com/heimweh/go-pagerduty/pagerduty/user.go @@ -333,7 +333,6 @@ func (s *UserService) CreateContactMethod(userID string, contactMethod *ContactM resp, err := s.client.newRequestDo("POST", u, nil, &ContactMethodPayload{ContactMethod: contactMethod}, &v) if err != nil { - if e, ok := err.(*Error); !ok || strings.Compare(fmt.Sprintf("%v", e.Errors), "[User Contact method must be unique]") != 0 { return nil, nil, err } @@ -344,7 +343,6 @@ func (s *UserService) CreateContactMethod(userID string, contactMethod *ContactM } v.ContactMethod = sContact resp = sResp - } if err = cachePutContactMethod(v.ContactMethod); err != nil { @@ -356,7 +354,6 @@ func (s *UserService) CreateContactMethod(userID string, contactMethod *ContactM return v.ContactMethod, resp, nil } - func (s *UserService) findExistingContactMethod(userID string, contactMethod *ContactMethod) (*ContactMethod, *Response, error) { lResp, _, lErr := s.ListContactMethods(userID) if lErr != nil { diff --git a/vendor/github.com/heimweh/go-pagerduty/pagerduty/webhook_subscription.go b/vendor/github.com/heimweh/go-pagerduty/pagerduty/webhook_subscription.go index 19f06ddbd..64a140ec9 100644 --- a/vendor/github.com/heimweh/go-pagerduty/pagerduty/webhook_subscription.go +++ b/vendor/github.com/heimweh/go-pagerduty/pagerduty/webhook_subscription.go @@ -22,6 +22,7 @@ type DeliveryMethod struct { Type string `json:"type,omitempty"` URL string `json:"url,omitempty"` CustomHeaders []*CustomHeaders `json:"custom_headers"` + Secret string `json:"secret,omitempty"` } type CustomHeaders struct { diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes new file mode 100644 index 000000000..402433593 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitattributes @@ -0,0 +1,2 @@ +* -text +*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore new file mode 100644 index 000000000..d31b37815 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +/s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml new file mode 100644 index 000000000..0af08e65e --- /dev/null +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -0,0 +1,141 @@ +# This is an example goreleaser.yaml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com +before: + hooks: + - ./gen.sh + - go install mvdan.cc/garble@latest + +builds: + - + id: "s2c" + binary: s2c + main: ./s2/cmd/s2c/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + - + id: "s2d" + binary: s2d + main: ./s2/cmd/s2d/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + - + id: "s2sx" + binary: s2sx + main: ./s2/cmd/_s2sx/main.go + flags: + - -modfile=s2sx.mod + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + +archives: + - + id: s2-binaries + name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}" + replacements: + aix: AIX + darwin: OSX + linux: Linux + windows: Windows + 386: i386 + amd64: x86_64 + freebsd: FreeBSD + netbsd: NetBSD + format_overrides: + - goos: windows + format: zip + files: + - unpack/* + - s2/LICENSE + - s2/README.md +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/compress + maintainer: Klaus Post + description: S2 Compression Tool + license: BSD 3-Clause + formats: + - deb + - rpm + replacements: + darwin: Darwin + linux: Linux + freebsd: FreeBSD + amd64: x86_64 diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE index 1eb75ef68..87d557477 100644 --- a/vendor/github.com/klauspost/compress/LICENSE +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -26,3 +26,279 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 000000000..5c3c2a258 --- /dev/null +++ b/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,529 @@ +# compress + +This package provides various compression algorithms. + +* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. +* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). +* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. +* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. +* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. +* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here. + +[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) +[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# changelog + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +
+ See Details +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. +
+ +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + +* Jan 11, 2022 (v1.14.1) + * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) + * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) + * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) + * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) + * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) + +
+ See changes to v1.13.x + +* Aug 30, 2021 (v1.13.5) + * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) + * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) + * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) + * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) + +* Aug 12, 2021 (v1.13.4) + * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). + * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) + +* Aug 3, 2021 (v1.13.3) + * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) + * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) + * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) + * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) + * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) + * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) + +* Jun 14, 2021 (v1.13.1) + * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) + * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) + * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) + * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) + +* Jun 3, 2021 (v1.13.0) + * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. + * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) + * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ + +
+ See changes to v1.12.x + +* May 25, 2021 (v1.12.3) + * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) + * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) + * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) + +* Apr 27, 2021 (v1.12.2) + * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) + * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) + * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) + * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) + * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) + * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) + +* Apr 14, 2021 (v1.12.1) + * snappy package removed. Upstream added as dependency. + * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) + * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) + * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) + * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) + * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) + * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
+ +
+ See changes to v1.11.x + +* Mar 26, 2021 (v1.11.13) + * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) + * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) + * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) + * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) + +* Mar 5, 2021 (v1.11.12) + * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). + * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) + +* Mar 1, 2021 (v1.11.9) + * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) + * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) + * s2: Fix binaries. + +* Feb 25, 2021 (v1.11.8) + * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) + * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) + * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) + * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) + +* Jan 14, 2021 (v1.11.7) + * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) + * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) + * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) + * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) + * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) + +* Jan 7, 2021 (v1.11.6) + * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) + * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) + +* Dec 20, 2020 (v1.11.4) + * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) + * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) + * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) + * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) + * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) + +* Nov 15, 2020 (v1.11.3) + * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) + * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) + +* Oct 11, 2020 (v1.11.2) + * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) + +* Oct 1, 2020 (v1.11.1) + * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) + +* Sept 8, 2020 (v1.11.0) + * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) + * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) + * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) +
+ +
+ See changes to v1.10.x + +* July 8, 2020 (v1.10.11) + * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) + * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) + +* June 23, 2020 (v1.10.10) + * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) + +* June 16, 2020 (v1.10.9): + * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) + * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) + * Fuzzit tests removed. The service has been purchased and is no longer available. + +* June 5, 2020 (v1.10.8): + * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) + +* June 1, 2020 (v1.10.7): + * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) + * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) + * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) + +* May 21, 2020: (v1.10.6) + * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) + * zstd: Stricter decompression checks. + +* April 12, 2020: (v1.10.5) + * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) + +* Apr 8, 2020: (v1.10.4) + * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) +* Mar 11, 2020: (v1.10.3) + * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) + * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) + * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) + * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) + * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) + +* Feb 27, 2020: (v1.10.2) + * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) + * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) + +* Feb 18, 2020: (v1.10.1) + * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) + * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) + * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) + +* Feb 4, 2020: (v1.10.0) + * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) + * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) + * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) + * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) + +
+ +
+ See changes prior to v1.10.0 + +* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). +* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) +* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. +* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. +* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) +* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. +* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) +* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features +* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) +* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) +* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. +* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) +* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) +* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) +* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. +* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. +* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) +* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. +* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) +* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. +* Nov 11, 2019: Reduce inflate memory use by 1KB. +* Nov 10, 2019: Less allocations in deflate bit writer. +* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. +* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) +* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) +* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) +* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) + +
+ +
+ See changes prior to v1.9.0 + +* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) +* Oct 3, 2019: Fix inconsistent results on broken zstd streams. +* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) +* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). +* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). +* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). +* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. +* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. +* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. +* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. +* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. +* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. +* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. +* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) +* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) +* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) +* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) +* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. +* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. +* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. +* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. +* June 17, 2019: zstd decompression bugfix. +* June 17, 2019: fix 32 bit builds. +* June 17, 2019: Easier use in modules (less dependencies). +* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. +* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. +* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. +* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! +* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. +* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +
+ +# deflate usage + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +| old import | new import | Documentation +|--------------------|-----------------------------------------|--------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) + +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +Memory usage is typically 1MB for a Writer. stdlib is in the same range. +If you expect to have a lot of concurrently allocated Writers consider using +the stateless compress described below. + +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + +# Stateless compression + +This package offers stateless compression as a special option for gzip/deflate. +It will do compression but without maintaining any state between Write calls. + +This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. + +This is only relevant in cases where you expect to run many thousands of compressors concurrently, +but with very little activity. This is *not* intended for regular web servers serving individual requests. + +Because of this, the size of actual Write calls will affect output size. + +In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. + +For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) + +A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: + +``` + // replace 'ioutil.Discard' with your output. + gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) + if err != nil { + return err + } + defer gzw.Close() + + w := bufio.NewWriterSize(gzw, 4096) + defer w.Flush() + + // Write to 'w' +``` + +This will only use up to 4KB in memory when the writer is idle. + +Compression is almost always worse than the fastest compression level +and each write will allocate (a little) memory. + +# Performance Update 2018 + +It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. + +The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. + +The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. + +The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). + + +## Overall differences. + +There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. + +The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. + +This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. + +There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. + +## Web Content + +This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. + +Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. + +Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. + +## Object files + +This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. + +The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. + +The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. + +## Highly Compressible File + +This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. + +It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. + +So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". + +## Medium-High Compressible + +This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. + +We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. + +## Medium Compressible + +I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. + +The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. + + +## Un-compressible Content + +This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. + + +## Huffman only compression + +This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. + +This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). + +Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. + +The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). + +The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. + +For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). + +This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. + +# Other packages + +Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): + +* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. +* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. +* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go new file mode 100644 index 000000000..ea5a692d5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/compressible.go @@ -0,0 +1,85 @@ +package compress + +import "math" + +// Estimate returns a normalized compressibility estimate of block b. +// Values close to zero are likely uncompressible. +// Values above 0.1 are likely to be compressible. +// Values above 0.5 are very compressible. +// Very small lengths will return 0. +func Estimate(b []byte) float64 { + if len(b) < 16 { + return 0 + } + + // Correctly predicted order 1 + hits := 0 + lastMatch := false + var o1 [256]byte + var hist [256]int + c1 := byte(0) + for _, c := range b { + if c == o1[c1] { + // We only count a hit if there was two correct predictions in a row. + if lastMatch { + hits++ + } + lastMatch = true + } else { + lastMatch = false + } + o1[c1] = c + c1 = c + hist[c]++ + } + + // Use x^0.6 to give better spread + prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) + + // Calculate histogram distribution + variance := float64(0) + avg := float64(len(b)) / 256 + + for _, v := range hist { + Δ := float64(v) - avg + variance += Δ * Δ + } + + stddev := math.Sqrt(float64(variance)) / float64(len(b)) + exp := math.Sqrt(1 / float64(len(b))) + + // Subtract expected stddev + stddev -= exp + if stddev < 0 { + stddev = 0 + } + stddev *= 1 + exp + + // Use x^0.4 to give better spread + entropy := math.Pow(stddev, 0.4) + + // 50/50 weight between prediction and histogram distribution + return math.Pow((prediction+entropy)/2, 0.9) +} + +// ShannonEntropyBits returns the number of bits minimum required to represent +// an entropy encoding of the input bytes. +// https://en.wiktionary.org/wiki/Shannon_entropy +func ShannonEntropyBits(b []byte) int { + if len(b) == 0 { + return 0 + } + var hist [256]int + for _, c := range b { + hist[c]++ + } + shannon := float64(0) + invTotal := 1.0 / float64(len(b)) + for _, v := range hist[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } + } + return int(math.Ceil(shannon)) +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index b69237c9b..6f341914c 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -92,7 +92,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTra im := int32((nbBitsOut << 16) - first.deltaNbBits) lu := (im >> nbBitsOut) + first.deltaFindState c.state = c.stateTable[lu] - return } // encode the output symbol provided and write it to the bitstream. @@ -301,7 +300,7 @@ func (s *Scratch) writeCount() error { out[outP+1] = byte(bitStream >> 8) outP += (bitCount + 7) / 8 - if uint16(charnum) > s.symbolLen { + if charnum > s.symbolLen { return errors.New("internal error: charnum > s.symbolLen") } s.Out = out[:outP] @@ -331,7 +330,7 @@ type cTable struct { func (s *Scratch) allocCtable() { tableSize := 1 << s.actualTableLog // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < int(tableSize) { + if cap(s.ct.tableSymbol) < tableSize { s.ct.tableSymbol = make([]byte, tableSize) } s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] @@ -565,8 +564,8 @@ func (s *Scratch) normalizeCount2() error { distributed uint32 total = uint32(s.br.remain()) tableLog = s.actualTableLog - lowThreshold = uint32(total >> tableLog) - lowOne = uint32((total * 3) >> (tableLog + 1)) + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) ) for i, cnt := range s.count[:s.symbolLen] { if cnt == 0 { @@ -591,7 +590,7 @@ func (s *Scratch) normalizeCount2() error { if (total / toDistribute) > lowOne { // risk of rounding to zero - lowOne = uint32((total * 3) / (toDistribute * 2)) + lowOne = (total * 3) / (toDistribute * 2) for i, cnt := range s.count[:s.symbolLen] { if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { s.norm[i] = 1 diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index 413ec3b3c..926f5f153 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -172,7 +172,7 @@ type decSymbol struct { // allocDtable will allocate decoding tables if they are not big enough. func (s *Scratch) allocDtable() { tableSize := 1 << s.actualTableLog - if cap(s.decTable) < int(tableSize) { + if cap(s.decTable) < tableSize { s.decTable = make([]decSymbol, tableSize) } s.decTable = s.decTable[:tableSize] @@ -340,7 +340,7 @@ type decoder struct { func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { d.dt = dt d.br = in - d.state = uint16(in.getBits(tableLog)) + d.state = in.getBits(tableLog) } // next returns the next symbol and sets the next state. diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh new file mode 100644 index 000000000..aff942205 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gen.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cd s2/cmd/_s2sx/ || exit 1 +go generate . diff --git a/vendor/github.com/klauspost/compress/go.mod b/vendor/github.com/klauspost/compress/go.mod new file mode 100644 index 000000000..b605e2d52 --- /dev/null +++ b/vendor/github.com/klauspost/compress/go.mod @@ -0,0 +1,3 @@ +module github.com/klauspost/compress + +go 1.16 diff --git a/vendor/github.com/klauspost/compress/go.sum b/vendor/github.com/klauspost/compress/go.sum new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md index e12da4db2..8b6e5c663 100644 --- a/vendor/github.com/klauspost/compress/huff0/README.md +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -14,7 +14,9 @@ but it can be used as a secondary step to compressors (like Snappy) that does no ## News - * Mar 2018: First implementation released. Consider this beta software for now. +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. # Usage diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index a4979e886..504a7be9d 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -8,115 +8,10 @@ package huff0 import ( "encoding/binary" "errors" + "fmt" "io" ) -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBit32(uint32(v))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) peekBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -func (b *bitReader) advance(n uint8) { - b.bitsRead += n -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - // bitReader reads a bitstream in reverse. // The last set bit indicates the start of the stream and is used // for aligning the input. @@ -213,10 +108,17 @@ func (b *bitReaderBytes) finished() bool { return b.off == 0 && b.bitsRead >= 64 } +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReaderBytes) close() error { // Release reference. b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } @@ -313,15 +215,17 @@ func (b *bitReaderShifted) fill() { } } -// finished returns true if all bits have been read from the bit stream. -func (b *bitReaderShifted) finished() bool { - return b.off == 0 && b.bitsRead >= 64 +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) } // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReaderShifted) close() error { // Release reference. b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index 6bce4e87d..ec71f7a34 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -5,8 +5,6 @@ package huff0 -import "fmt" - // bitWriter will write bits. // First bit will be LSB of the first byte of output. type bitWriter struct { @@ -23,14 +21,6 @@ var bitMask16 = [32]uint16{ 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF} /* up to 16 bits */ -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. // It will not check if there is space for them, so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { @@ -70,104 +60,6 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { b.nBits += encA.nBits + encB.nBits } -// addBits16ZeroNC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -// This is fastest if bits can be zero. -func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { - if bits == 0 { - return - } - value <<= (16 - bits) & 15 - value >>= (16 - bits) & 15 - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - return - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - b.bitContainer >>= 1 << 3 - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - b.bitContainer >>= 2 << 3 - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - b.bitContainer >>= 3 << 3 - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - b.bitContainer >>= 4 << 3 - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - b.bitContainer >>= 5 << 3 - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - b.bitContainer >>= 6 << 3 - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - b.bitContainer >>= 7 << 3 - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - b.bitContainer = 0 - b.nBits = 0 - return - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.nBits &= 7 -} - // flush32 will flush out, so there are at least 32 bits available for writing. func (b *bitWriter) flush32() { if b.nBits < 32 { @@ -201,10 +93,3 @@ func (b *bitWriter) close() error { b.flushAlign() return nil } - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go index 50bcdf6ea..4dcab8d23 100644 --- a/vendor/github.com/klauspost/compress/huff0/bytereader.go +++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -20,11 +20,6 @@ func (b *byteReader) init(in []byte) { b.off = 0 } -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - // Int32 returns a little endian int32 starting at current offset. func (b byteReader) Int32() int32 { v3 := int32(b.b[b.off+3]) @@ -43,11 +38,6 @@ func (b byteReader) Uint32() uint32 { return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 } -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - // remain will return the number of bytes remaining. func (b byteReader) remain() int { return len(b.b) - b.off diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index f9ed5f830..4d14542fa 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -2,6 +2,7 @@ package huff0 import ( "fmt" + "math" "runtime" "sync" ) @@ -161,6 +162,70 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error) return s.Out, false, nil } +// EstimateSizes will estimate the data sizes +func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { + s, err = s.prepare(in) + if err != nil { + return 0, 0, 0, err + } + + // Create histogram, if none was provided. + tableSz, dataSz, reuseSz = -1, -1, -1 + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return 0, 0, 0, ErrIncompressible + } + // One symbol, use RLE + return 0, 0, 0, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return 0, 0, 0, ErrIncompressible + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return 0, 0, 0, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + tableSz, err = s.cTable.estTableSize(s) + if err != nil { + return 0, 0, 0, err + } + if canReuse { + reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) + } + dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) + + // Restore + return tableSz, dataSz, reuseSz, nil +} + func (s *Scratch) compress1X(src []byte) ([]byte, error) { return s.compress1xDo(s.Out, src) } @@ -225,6 +290,10 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) { if err != nil { return nil, err } + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } // Write compressed length as little endian before block. if i < 3 { // Last length is not written. @@ -268,6 +337,10 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { return nil, errs[i] } o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } // Write compressed length as little endian before block. if i < 3 { // Last length is not written. @@ -331,6 +404,7 @@ func (s *Scratch) canUseTable(c cTable) bool { return true } +//lint:ignore U1000 used for debugging func (s *Scratch) validateTable(c cTable) bool { if len(c) < int(s.symbolLen) { return false @@ -403,7 +477,7 @@ func (s *Scratch) buildCTable() error { var startNode = int16(s.symbolLen) nonNullRank := s.symbolLen - 1 - nodeNb := int16(startNode) + nodeNb := startNode huffNode := s.nodes[1 : huffNodesLen+1] // This overlays the slice above, but allows "-1" index lookups. @@ -536,7 +610,6 @@ func (s *Scratch) huffSort() { } nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} } - return } func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { @@ -580,7 +653,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { // Get pos of last (smallest) symbol per rank { - currentNbBits := uint8(maxNbBits) + currentNbBits := maxNbBits for pos := int(n); pos >= 0; pos-- { if huffNode[pos].nbBits >= currentNbBits { continue diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 41703bba4..c0c48bd70 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -4,13 +4,13 @@ import ( "errors" "fmt" "io" + "sync" "github.com/klauspost/compress/fse" ) type dTable struct { single []dEntrySingle - double []dEntryDouble } // single-symbols decoding @@ -18,13 +18,6 @@ type dEntrySingle struct { entry uint16 } -// double-symbols decoding -type dEntryDouble struct { - seq uint16 - nBits uint8 - len uint8 -} - // Uses special code for all tables that are < 8 bits. const use8BitTables = true @@ -34,7 +27,7 @@ const use8BitTables = true // If no Scratch is provided a new one is allocated. // The returned Scratch can be used for encoding or decoding input using this table. func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { - s, err = s.prepare(in) + s, err = s.prepare(nil) if err != nil { return s, nil, err } @@ -216,6 +209,7 @@ func (s *Scratch) Decoder() *Decoder { return &Decoder{ dt: s.dt, actualTableLog: s.actualTableLog, + bufs: &s.decPool, } } @@ -223,103 +217,15 @@ func (s *Scratch) Decoder() *Decoder { type Decoder struct { dt dTable actualTableLog uint8 + bufs *sync.Pool } -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress1X8Bit(dst, src) - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - dt := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - - for br.off >= 8 { - br.fillFast() - v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - // Refill - br.fillFast() - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - if len(dst) >= maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - return dst, br.close() + return &[4][256]byte{} } // decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. @@ -341,41 +247,258 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { dt := d.dt.single[:256] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 - shift := (8 - d.actualTableLog) & 7 - - //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) - for br.off >= 4 { - br.fillFast() - v := dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded + switch d.actualTableLog { + case 8: + const shift = 8 - 8 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 7: + const shift = 8 - 7 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 6: + const shift = 8 - 6 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) } - dst = append(dst, buf[:]...) } + case 5: + const shift = 8 - 5 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 4: + const shift = 8 - 4 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 3: + const shift = 8 - 3 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 2: + const shift = 8 - 2 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 1: + const shift = 8 - 1 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + default: + d.bufs.Put(bufs) + return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -383,6 +506,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { // br < 4, so uint8 is fine bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + shift := (8 - d.actualTableLog) & 7 + for bitsLeft > 0 { if br.bitsRead >= 64-8 { for br.off > 0 { @@ -393,6 +518,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { } if len(dst) >= maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } v := dt[br.peekByteFast()>>shift] @@ -401,6 +527,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { bitsLeft -= int8(nBits) dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } @@ -420,33 +547,35 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { dt := d.dt.single[:256] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 - const shift = 0 + const shift = 56 //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) for br.off >= 4 { br.fillFast() - v := dt[br.peekByteFast()>>shift] + v := dt[uint8(br.value>>shift)] br.advance(uint8(v.entry)) buf[off+0] = uint8(v.entry >> 8) - v = dt[br.peekByteFast()>>shift] + v = dt[uint8(br.value>>shift)] br.advance(uint8(v.entry)) buf[off+1] = uint8(v.entry >> 8) - v = dt[br.peekByteFast()>>shift] + v = dt[uint8(br.value>>shift)] br.advance(uint8(v.entry)) buf[off+2] = uint8(v.entry >> 8) - v = dt[br.peekByteFast()>>shift] + v = dt[uint8(br.value>>shift)] br.advance(uint8(v.entry)) buf[off+3] = uint8(v.entry >> 8) off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -455,6 +584,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -471,208 +601,20 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { } } if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } - v := dt[br.peekByteFast()>>shift] + v := dt[br.peekByteFast()] nBits := uint8(v.entry) br.advance(nBits) bitsLeft -= int8(nBits) dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - var decoded int - - // Decode 2 values from each decoder/loop. - const bufoff = 256 / 4 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v2 := single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v2 = single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v2 := single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v2 = single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - off += 2 - - if off == bufoff { - if bufoff > dstEvery { - return nil, errors.New("corruption detected: stream overrun 1") - } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 - // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { - return nil, errors.New("corruption detected: stream overrun 2") - } - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - for i := range br { - offset := dstEvery * i - br := &br[i] - bitsLeft := br.off*8 + uint(64-br.bitsRead) - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= len(out) { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - // Decompress4X will decompress a 4X encoded stream. // The length of the supplied input must match the end of a block exactly. // The *capacity* of the dst slice must match the destination size of @@ -706,19 +648,18 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { out := dst dstEvery := (dstSize + 3) / 4 - shift := (8 - d.actualTableLog) & 7 + shift := (56 + (8 - d.actualTableLog)) & 63 const tlSize = 1 << 8 - const tlMask = tlSize - 1 single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -728,96 +669,109 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { // Interleave 2 decodes. const stream = 0 const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } { const stream = 2 const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } off += 4 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) out = out[bufoff:] - decoded += 256 + decoded += bufoff * 4 // There must at least be 3 buffers left. if len(out) < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } } @@ -825,23 +779,31 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { if off > 0 { ioff := int(off) if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { if br.finished() { + d.bufs.Put(buf) return nil, io.ErrUnexpectedEOF } if br.bitsRead >= 56 { @@ -861,24 +823,31 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { } } // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } // Read value and increment offset. - v := single[br.peekByteFast()>>shift].entry + v := single[uint8(br.value>>shift)].entry nBits := uint8(v) br.advance(nBits) - bitsLeft -= int(nBits) + bitsLeft -= uint(nBits) out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } decoded += offset - dstEvery*i err = br.close() if err != nil { + d.bufs.Put(buf) return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } @@ -914,18 +883,17 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { out := dst dstEvery := (dstSize + 3) / 4 - const shift = 0 + const shift = 56 const tlSize = 1 << 8 - const tlMask = tlSize - 1 single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -935,96 +903,109 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { // Interleave 2 decodes. const stream = 0 const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } { const stream = 2 const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } off += 4 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) out = out[bufoff:] - decoded += 256 + decoded += bufoff * 4 // There must at least be 3 buffers left. if len(out) < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } } @@ -1034,21 +1015,27 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { if len(out) < dstEvery*3+ioff { return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { if br.finished() { + d.bufs.Put(buf) return nil, io.ErrUnexpectedEOF } if br.bitsRead >= 56 { @@ -1068,24 +1055,32 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { } } // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } // Read value and increment offset. - v := single[br.peekByteFast()>>shift].entry + v := single[br.peekByteFast()].entry nBits := uint8(v) br.advance(nBits) - bitsLeft -= int(nBits) + bitsLeft -= uint(nBits) out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i err = br.close() if err != nil { + d.bufs.Put(buf) return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 000000000..671e630a8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,228 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// and Decoder.Decompress1X that use an asm implementation of thir main loops. +package huff0 + +import ( + "errors" + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr0 *bitReaderShifted + pbr1 *bitReaderShifted + pbr2 *bitReaderShifted + pbr3 *bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr0: &br[0], + pbr1: &br[1], + pbr2: &br[2], + pbr3: &br[3], + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 000000000..6c65c6e2b --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,865 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $8-8 + XORQ DX, DX + + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 32(AX), SI + MOVQ 40(AX), DI + MOVQ DI, BX + MOVQ 72(AX), CX + MOVQ CX, (SP) + MOVQ 48(AX), R8 + MOVQ 56(AX), R9 + MOVQ (AX), R10 + MOVQ 8(AX), R11 + MOVQ 16(AX), R12 + MOVQ 24(AX), R13 + + // Main loop +main_loop: + MOVQ BX, DI + CMPQ DI, (SP) + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R14 + MOVBQZX 40(R10), R15 + CMPQ R15, $0x20 + JBE skip_fill0 + MOVQ 24(R10), AX + SUBQ $0x20, R15 + SUBQ $0x04, AX + MOVQ (R10), BP + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(BP*1), BP + MOVQ R15, CX + SHLQ CL, BP + MOVQ AX, 24(R10) + ORQ BP, R14 + + // exhausted = exhausted || (br0.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R14, BP + MOVQ SI, CX + SHRQ CL, BP + + // v0 := table[val0&mask] + MOVW (R9)(BP*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R14 + ADDB CL, R15 + + // val1 := br0.peekTopBits(peekBits) + MOVQ SI, CX + MOVQ R14, BP + SHRQ CL, BP + + // v1 := table[val1&mask] + MOVW (R9)(BP*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R14 + ADDB CL, R15 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (DI) + + // update the bitrader reader structure + MOVQ R14, 32(R10) + MOVB R15, 40(R10) + ADDQ R8, DI + + // br1.fillFast32() + MOVQ 32(R11), R14 + MOVBQZX 40(R11), R15 + CMPQ R15, $0x20 + JBE skip_fill1 + MOVQ 24(R11), AX + SUBQ $0x20, R15 + SUBQ $0x04, AX + MOVQ (R11), BP + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(BP*1), BP + MOVQ R15, CX + SHLQ CL, BP + MOVQ AX, 24(R11) + ORQ BP, R14 + + // exhausted = exhausted || (br1.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R14, BP + MOVQ SI, CX + SHRQ CL, BP + + // v0 := table[val0&mask] + MOVW (R9)(BP*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R14 + ADDB CL, R15 + + // val1 := br1.peekTopBits(peekBits) + MOVQ SI, CX + MOVQ R14, BP + SHRQ CL, BP + + // v1 := table[val1&mask] + MOVW (R9)(BP*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R14 + ADDB CL, R15 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (DI) + + // update the bitrader reader structure + MOVQ R14, 32(R11) + MOVB R15, 40(R11) + ADDQ R8, DI + + // br2.fillFast32() + MOVQ 32(R12), R14 + MOVBQZX 40(R12), R15 + CMPQ R15, $0x20 + JBE skip_fill2 + MOVQ 24(R12), AX + SUBQ $0x20, R15 + SUBQ $0x04, AX + MOVQ (R12), BP + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(BP*1), BP + MOVQ R15, CX + SHLQ CL, BP + MOVQ AX, 24(R12) + ORQ BP, R14 + + // exhausted = exhausted || (br2.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R14, BP + MOVQ SI, CX + SHRQ CL, BP + + // v0 := table[val0&mask] + MOVW (R9)(BP*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R14 + ADDB CL, R15 + + // val1 := br2.peekTopBits(peekBits) + MOVQ SI, CX + MOVQ R14, BP + SHRQ CL, BP + + // v1 := table[val1&mask] + MOVW (R9)(BP*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R14 + ADDB CL, R15 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (DI) + + // update the bitrader reader structure + MOVQ R14, 32(R12) + MOVB R15, 40(R12) + ADDQ R8, DI + + // br3.fillFast32() + MOVQ 32(R13), R14 + MOVBQZX 40(R13), R15 + CMPQ R15, $0x20 + JBE skip_fill3 + MOVQ 24(R13), AX + SUBQ $0x20, R15 + SUBQ $0x04, AX + MOVQ (R13), BP + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(BP*1), BP + MOVQ R15, CX + SHLQ CL, BP + MOVQ AX, 24(R13) + ORQ BP, R14 + + // exhausted = exhausted || (br3.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R14, BP + MOVQ SI, CX + SHRQ CL, BP + + // v0 := table[val0&mask] + MOVW (R9)(BP*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R14 + ADDB CL, R15 + + // val1 := br3.peekTopBits(peekBits) + MOVQ SI, CX + MOVQ R14, BP + SHRQ CL, BP + + // v1 := table[val1&mask] + MOVW (R9)(BP*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R14 + ADDB CL, R15 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (DI) + + // update the bitrader reader structure + MOVQ R14, 32(R13) + MOVB R15, 40(R13) + ADDQ $0x02, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + MOVQ 40(AX), CX + MOVQ BX, DX + SUBQ CX, DX + SHLQ $0x02, DX + MOVQ DX, 64(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $16-8 + XORQ DX, DX + + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 32(CX), BX + MOVQ 40(CX), SI + MOVQ SI, (SP) + MOVQ 72(CX), DX + MOVQ DX, 8(SP) + MOVQ 48(CX), DI + MOVQ 56(CX), R8 + MOVQ (CX), R9 + MOVQ 8(CX), R10 + MOVQ 16(CX), R11 + MOVQ 24(CX), R12 + + // Main loop +main_loop: + MOVQ (SP), SI + CMPQ SI, 8(SP) + SETGE DL + + // br1000.fillFast32() + MOVQ 32(R9), R13 + MOVBQZX 40(R9), R14 + CMPQ R14, $0x20 + JBE skip_fill1000 + MOVQ 24(R9), R15 + SUBQ $0x20, R14 + SUBQ $0x04, R15 + MOVQ (R9), BP + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R15)(BP*1), BP + MOVQ R14, CX + SHLQ CL, BP + MOVQ R15, 24(R9) + ORQ BP, R13 + + // exhausted = exhausted || (br1000.off < 4) + CMPQ R15, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1000: + // val0 := br0.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v0 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R13 + ADDB CL, R14 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v1 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R13 + ADDB CL, R14 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v2 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R13 + ADDB CL, R14 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v3 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R13 + ADDB CL, R14 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (SI) + + // update the bitreader reader structure + MOVQ R13, 32(R9) + MOVB R14, 40(R9) + ADDQ DI, SI + + // br1001.fillFast32() + MOVQ 32(R10), R13 + MOVBQZX 40(R10), R14 + CMPQ R14, $0x20 + JBE skip_fill1001 + MOVQ 24(R10), R15 + SUBQ $0x20, R14 + SUBQ $0x04, R15 + MOVQ (R10), BP + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R15)(BP*1), BP + MOVQ R14, CX + SHLQ CL, BP + MOVQ R15, 24(R10) + ORQ BP, R13 + + // exhausted = exhausted || (br1001.off < 4) + CMPQ R15, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1001: + // val0 := br1.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v0 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R13 + ADDB CL, R14 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v1 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R13 + ADDB CL, R14 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v2 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R13 + ADDB CL, R14 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v3 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R13 + ADDB CL, R14 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (SI) + + // update the bitreader reader structure + MOVQ R13, 32(R10) + MOVB R14, 40(R10) + ADDQ DI, SI + + // br1002.fillFast32() + MOVQ 32(R11), R13 + MOVBQZX 40(R11), R14 + CMPQ R14, $0x20 + JBE skip_fill1002 + MOVQ 24(R11), R15 + SUBQ $0x20, R14 + SUBQ $0x04, R15 + MOVQ (R11), BP + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R15)(BP*1), BP + MOVQ R14, CX + SHLQ CL, BP + MOVQ R15, 24(R11) + ORQ BP, R13 + + // exhausted = exhausted || (br1002.off < 4) + CMPQ R15, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1002: + // val0 := br2.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v0 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R13 + ADDB CL, R14 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v1 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R13 + ADDB CL, R14 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v2 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R13 + ADDB CL, R14 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v3 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R13 + ADDB CL, R14 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (SI) + + // update the bitreader reader structure + MOVQ R13, 32(R11) + MOVB R14, 40(R11) + ADDQ DI, SI + + // br1003.fillFast32() + MOVQ 32(R12), R13 + MOVBQZX 40(R12), R14 + CMPQ R14, $0x20 + JBE skip_fill1003 + MOVQ 24(R12), R15 + SUBQ $0x20, R14 + SUBQ $0x04, R15 + MOVQ (R12), BP + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R15)(BP*1), BP + MOVQ R14, CX + SHLQ CL, BP + MOVQ R15, 24(R12) + ORQ BP, R13 + + // exhausted = exhausted || (br1003.off < 4) + CMPQ R15, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1003: + // val0 := br3.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v0 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R13 + ADDB CL, R14 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v1 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R13 + ADDB CL, R14 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v2 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R13 + ADDB CL, R14 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v3 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R13 + ADDB CL, R14 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (SI) + + // update the bitreader reader structure + MOVQ R13, 32(R12) + MOVB R14, 40(R12) + ADDQ $0x04, (SP) + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + MOVQ 40(AX), CX + MOVQ (SP), DX + SUBQ CX, DX + SHLQ $0x02, DX + MOVQ DX, 64(AX) + RET + +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exeeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exeeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + MOVQ DX, CX + MOVQ 16(AX), DX + SUBQ DX, CX + MOVQ CX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exeeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET + +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exeeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exeeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + MOVQ DX, CX + MOVQ 16(AX), DX + SUBQ DX, CX + MOVQ CX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exeeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 000000000..4f6f37cb2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,295 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) + out = out[bufoff:] + decoded += bufoff * 4 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 7ec2022b6..e8ad17ad0 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "math/bits" + "sync" "github.com/klauspost/compress/fse" ) @@ -116,6 +117,7 @@ type Scratch struct { nodes []nodeElt tmpOut [4][]byte fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. huffWeight [maxSymbolValue + 1]byte } @@ -245,6 +247,68 @@ func (c cTable) write(s *Scratch) error { return nil } +func (c cTable) estTableSize(s *Scratch) (sz int, err error) { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + sz += 1 + len(b) + return sz, nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return 0, ErrIncompressible + } + // special case, pack weights 4 bits/weight. + sz += 1 + int(maxSymbolValue/2) + return sz, nil +} + // estimateSize returns the estimated size in bytes of the input represented in the // histogram supplied. func (c cTable) estimateSize(hist []uint32) int { diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 000000000..3954c5121 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 000000000..e802579c4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 000000000..4465fbe9e --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/snappy/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE similarity index 100% rename from vendor/github.com/klauspost/compress/snappy/LICENSE rename to vendor/github.com/klauspost/compress/internal/snapref/LICENSE diff --git a/vendor/github.com/klauspost/compress/snappy/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go similarity index 85% rename from vendor/github.com/klauspost/compress/snappy/decode.go rename to vendor/github.com/klauspost/compress/internal/snapref/decode.go index 72efb0353..40796a49d 100644 --- a/vendor/github.com/klauspost/compress/snappy/decode.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package snappy +package snapref import ( "encoding/binary" @@ -52,6 +52,8 @@ const ( // Otherwise, a newly allocated slice will be returned. // // The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. func Decode(dst, src []byte) ([]byte, error) { dLen, s, err := decodedLen(src) if err != nil { @@ -83,6 +85,8 @@ func NewReader(r io.Reader) *Reader { } // Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. type Reader struct { r io.Reader err error @@ -114,32 +118,23 @@ func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { return true } -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } +func (r *Reader) fill() error { + for r.i >= r.j { if !r.readFull(r.buf[:4], true) { - return 0, r.err + return r.err } chunkType := r.buf[0] if !r.readHeader { if chunkType != chunkTypeStreamIdentifier { r.err = ErrCorrupt - return 0, r.err + return r.err } r.readHeader = true } chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 if chunkLen > len(r.buf) { r.err = ErrUnsupported - return 0, r.err + return r.err } // The chunk types are specified at @@ -149,11 +144,11 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.2. Compressed data (chunk type 0x00). if chunkLen < checksumSize { r.err = ErrCorrupt - return 0, r.err + return r.err } buf := r.buf[:chunkLen] if !r.readFull(buf, false) { - return 0, r.err + return r.err } checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 buf = buf[checksumSize:] @@ -161,19 +156,19 @@ func (r *Reader) Read(p []byte) (int, error) { n, err := DecodedLen(buf) if err != nil { r.err = err - return 0, r.err + return r.err } if n > len(r.decoded) { r.err = ErrCorrupt - return 0, r.err + return r.err } if _, err := Decode(r.decoded, buf); err != nil { r.err = err - return 0, r.err + return r.err } if crc(r.decoded[:n]) != checksum { r.err = ErrCorrupt - return 0, r.err + return r.err } r.i, r.j = 0, n continue @@ -182,25 +177,25 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.3. Uncompressed data (chunk type 0x01). if chunkLen < checksumSize { r.err = ErrCorrupt - return 0, r.err + return r.err } buf := r.buf[:checksumSize] if !r.readFull(buf, false) { - return 0, r.err + return r.err } checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 // Read directly into r.decoded instead of via r.buf. n := chunkLen - checksumSize if n > len(r.decoded) { r.err = ErrCorrupt - return 0, r.err + return r.err } if !r.readFull(r.decoded[:n], false) { - return 0, r.err + return r.err } if crc(r.decoded[:n]) != checksum { r.err = ErrCorrupt - return 0, r.err + return r.err } r.i, r.j = 0, n continue @@ -209,15 +204,15 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.1. Stream identifier (chunk type 0xff). if chunkLen != len(magicBody) { r.err = ErrCorrupt - return 0, r.err + return r.err } if !r.readFull(r.buf[:len(magicBody)], false) { - return 0, r.err + return r.err } for i := 0; i < len(magicBody); i++ { if r.buf[i] != magicBody[i] { r.err = ErrCorrupt - return 0, r.err + return r.err } } continue @@ -226,12 +221,44 @@ func (r *Reader) Read(p []byte) (int, error) { if chunkType <= 0x7f { // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). r.err = ErrUnsupported - return 0, r.err + return r.err } // Section 4.4 Padding (chunk type 0xfe). // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). if !r.readFull(r.buf[:chunkLen], false) { - return 0, r.err + return r.err } } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil } diff --git a/vendor/github.com/klauspost/compress/snappy/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go similarity index 97% rename from vendor/github.com/klauspost/compress/snappy/decode_other.go rename to vendor/github.com/klauspost/compress/internal/snapref/decode_other.go index 94a96c5d7..77395a6b8 100644 --- a/vendor/github.com/klauspost/compress/snappy/decode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go @@ -2,9 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 appengine !gc noasm - -package snappy +package snapref // decode writes the decoding of src to dst. It assumes that the varint-encoded // length of the decompressed bytes has already been read, and that len(dst) @@ -87,7 +85,7 @@ func decode(dst, src []byte) int { } // Copy from an earlier sub-slice of dst to a later sub-slice. // If no overlap, use the built-in copy: - if offset > length { + if offset >= length { copy(dst[d:d+length], dst[d-offset:]) d += length continue diff --git a/vendor/github.com/klauspost/compress/snappy/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go similarity index 98% rename from vendor/github.com/klauspost/compress/snappy/encode.go rename to vendor/github.com/klauspost/compress/internal/snapref/encode.go index 8d393e904..13c6040a5 100644 --- a/vendor/github.com/klauspost/compress/snappy/encode.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package snappy +package snapref import ( "encoding/binary" @@ -15,6 +15,8 @@ import ( // Otherwise, a newly allocated slice will be returned. // // The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. func Encode(dst, src []byte) []byte { if n := MaxEncodedLen(len(src)); n < 0 { panic(ErrTooLarge) @@ -139,6 +141,8 @@ func NewBufferedWriter(w io.Writer) *Writer { } // Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. type Writer struct { w io.Writer err error diff --git a/vendor/github.com/klauspost/compress/snappy/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go similarity index 99% rename from vendor/github.com/klauspost/compress/snappy/encode_other.go rename to vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index dbcae905e..511bba65d 100644 --- a/vendor/github.com/klauspost/compress/snappy/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -2,9 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 appengine !gc noasm - -package snappy +package snapref func load32(b []byte, i int) uint32 { b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. diff --git a/vendor/github.com/klauspost/compress/snappy/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go similarity index 97% rename from vendor/github.com/klauspost/compress/snappy/snappy.go rename to vendor/github.com/klauspost/compress/internal/snapref/snappy.go index 74a36689e..34d01f4aa 100644 --- a/vendor/github.com/klauspost/compress/snappy/snappy.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package snappy implements the Snappy compression format. It aims for very +// Package snapref implements the Snappy compression format. It aims for very // high speeds and reasonable compression. // // There are actually two Snappy formats: block and stream. They are related, @@ -17,7 +17,7 @@ // // The canonical, C++ implementation is at https://github.com/google/snappy and // it only implements the block format. -package snappy +package snapref import ( "hash/crc32" diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod new file mode 100644 index 000000000..2263853fc --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -0,0 +1,4 @@ +module github.com/klauspost/compress + +go 1.16 + diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/klauspost/compress/snappy/.gitignore b/vendor/github.com/klauspost/compress/snappy/.gitignore deleted file mode 100644 index 042091d9b..000000000 --- a/vendor/github.com/klauspost/compress/snappy/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -cmd/snappytool/snappytool -testdata/bench - -# These explicitly listed benchmark data files are for an obsolete version of -# snappy_test.go. -testdata/alice29.txt -testdata/asyoulik.txt -testdata/fireworks.jpeg -testdata/geo.protodata -testdata/html -testdata/html_x_4 -testdata/kppkn.gtb -testdata/lcet10.txt -testdata/paper-100k.pdf -testdata/plrabn12.txt -testdata/urls.10K diff --git a/vendor/github.com/klauspost/compress/snappy/AUTHORS b/vendor/github.com/klauspost/compress/snappy/AUTHORS deleted file mode 100644 index bcfa19520..000000000 --- a/vendor/github.com/klauspost/compress/snappy/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of Snappy-Go authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Damian Gryski -Google Inc. -Jan Mercl <0xjnml@gmail.com> -Rodolfo Carvalho -Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS deleted file mode 100644 index 931ae3160..000000000 --- a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS +++ /dev/null @@ -1,37 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the Snappy-Go repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name - -# Please keep the list sorted. - -Damian Gryski -Jan Mercl <0xjnml@gmail.com> -Kai Backman -Marc-Antoine Ruel -Nigel Tao -Rob Pike -Rodolfo Carvalho -Russ Cox -Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/README b/vendor/github.com/klauspost/compress/snappy/README deleted file mode 100644 index cea12879a..000000000 --- a/vendor/github.com/klauspost/compress/snappy/README +++ /dev/null @@ -1,107 +0,0 @@ -The Snappy compression format in the Go programming language. - -To download and install from source: -$ go get github.com/golang/snappy - -Unless otherwise noted, the Snappy-Go source files are distributed -under the BSD-style license found in the LICENSE file. - - - -Benchmarks. - -The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten -or so files, the same set used by the C++ Snappy code (github.com/google/snappy -and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ -3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: - -"go test -test.bench=." - -_UFlat0-8 2.19GB/s ± 0% html -_UFlat1-8 1.41GB/s ± 0% urls -_UFlat2-8 23.5GB/s ± 2% jpg -_UFlat3-8 1.91GB/s ± 0% jpg_200 -_UFlat4-8 14.0GB/s ± 1% pdf -_UFlat5-8 1.97GB/s ± 0% html4 -_UFlat6-8 814MB/s ± 0% txt1 -_UFlat7-8 785MB/s ± 0% txt2 -_UFlat8-8 857MB/s ± 0% txt3 -_UFlat9-8 719MB/s ± 1% txt4 -_UFlat10-8 2.84GB/s ± 0% pb -_UFlat11-8 1.05GB/s ± 0% gaviota - -_ZFlat0-8 1.04GB/s ± 0% html -_ZFlat1-8 534MB/s ± 0% urls -_ZFlat2-8 15.7GB/s ± 1% jpg -_ZFlat3-8 740MB/s ± 3% jpg_200 -_ZFlat4-8 9.20GB/s ± 1% pdf -_ZFlat5-8 991MB/s ± 0% html4 -_ZFlat6-8 379MB/s ± 0% txt1 -_ZFlat7-8 352MB/s ± 0% txt2 -_ZFlat8-8 396MB/s ± 1% txt3 -_ZFlat9-8 327MB/s ± 1% txt4 -_ZFlat10-8 1.33GB/s ± 1% pb -_ZFlat11-8 605MB/s ± 1% gaviota - - - -"go test -test.bench=. -tags=noasm" - -_UFlat0-8 621MB/s ± 2% html -_UFlat1-8 494MB/s ± 1% urls -_UFlat2-8 23.2GB/s ± 1% jpg -_UFlat3-8 1.12GB/s ± 1% jpg_200 -_UFlat4-8 4.35GB/s ± 1% pdf -_UFlat5-8 609MB/s ± 0% html4 -_UFlat6-8 296MB/s ± 0% txt1 -_UFlat7-8 288MB/s ± 0% txt2 -_UFlat8-8 309MB/s ± 1% txt3 -_UFlat9-8 280MB/s ± 1% txt4 -_UFlat10-8 753MB/s ± 0% pb -_UFlat11-8 400MB/s ± 0% gaviota - -_ZFlat0-8 409MB/s ± 1% html -_ZFlat1-8 250MB/s ± 1% urls -_ZFlat2-8 12.3GB/s ± 1% jpg -_ZFlat3-8 132MB/s ± 0% jpg_200 -_ZFlat4-8 2.92GB/s ± 0% pdf -_ZFlat5-8 405MB/s ± 1% html4 -_ZFlat6-8 179MB/s ± 1% txt1 -_ZFlat7-8 170MB/s ± 1% txt2 -_ZFlat8-8 189MB/s ± 1% txt3 -_ZFlat9-8 164MB/s ± 1% txt4 -_ZFlat10-8 479MB/s ± 1% pb -_ZFlat11-8 270MB/s ± 1% gaviota - - - -For comparison (Go's encoded output is byte-for-byte identical to C++'s), here -are the numbers from C++ Snappy's - -make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log - -BM_UFlat/0 2.4GB/s html -BM_UFlat/1 1.4GB/s urls -BM_UFlat/2 21.8GB/s jpg -BM_UFlat/3 1.5GB/s jpg_200 -BM_UFlat/4 13.3GB/s pdf -BM_UFlat/5 2.1GB/s html4 -BM_UFlat/6 1.0GB/s txt1 -BM_UFlat/7 959.4MB/s txt2 -BM_UFlat/8 1.0GB/s txt3 -BM_UFlat/9 864.5MB/s txt4 -BM_UFlat/10 2.9GB/s pb -BM_UFlat/11 1.2GB/s gaviota - -BM_ZFlat/0 944.3MB/s html (22.31 %) -BM_ZFlat/1 501.6MB/s urls (47.78 %) -BM_ZFlat/2 14.3GB/s jpg (99.95 %) -BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) -BM_ZFlat/4 8.3GB/s pdf (83.30 %) -BM_ZFlat/5 903.5MB/s html4 (22.52 %) -BM_ZFlat/6 336.0MB/s txt1 (57.88 %) -BM_ZFlat/7 312.3MB/s txt2 (61.91 %) -BM_ZFlat/8 353.1MB/s txt3 (54.99 %) -BM_ZFlat/9 289.9MB/s txt4 (66.26 %) -BM_ZFlat/10 1.2GB/s pb (19.68 %) -BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.go b/vendor/github.com/klauspost/compress/snappy/encode_amd64.go deleted file mode 100644 index 150d91bc8..000000000 --- a/vendor/github.com/klauspost/compress/snappy/encode_amd64.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -package snappy - -// emitLiteral has the same semantics as in encode_other.go. -// -//go:noescape -func emitLiteral(dst, lit []byte) int - -// emitCopy has the same semantics as in encode_other.go. -// -//go:noescape -func emitCopy(dst []byte, offset, length int) int - -// extendMatch has the same semantics as in encode_other.go. -// -//go:noescape -func extendMatch(src []byte, i, j int) int - -// encodeBlock has the same semantics as in encode_other.go. -// -//go:noescape -func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.s b/vendor/github.com/klauspost/compress/snappy/encode_amd64.s deleted file mode 100644 index adfd979fe..000000000 --- a/vendor/github.com/klauspost/compress/snappy/encode_amd64.s +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a -// Go toolchain regression. See https://github.com/golang/go/issues/15426 and -// https://github.com/golang/snappy/issues/29 -// -// As a workaround, the package was built with a known good assembler, and -// those instructions were disassembled by "objdump -d" to yield the -// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 -// style comments, in AT&T asm syntax. Note that rsp here is a physical -// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). -// The instructions were then encoded as "BYTE $0x.." sequences, which assemble -// fine on Go 1.6. - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - AX len(lit) -// - BX n -// - DX return value -// - DI &dst[i] -// - R10 &lit[0] -// -// The 24 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $24-56 - MOVQ dst_base+0(FP), DI - MOVQ lit_base+24(FP), R10 - MOVQ lit_len+32(FP), AX - MOVQ AX, DX - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT oneByte - CMPL BX, $256 - JLT twoBytes - -threeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - ADDQ $3, DX - JMP memmove - -twoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - ADDQ $2, DX - JMP memmove - -oneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - ADDQ $1, DX - -memmove: - MOVQ DX, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - AX length -// - SI &dst[0] -// - DI &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVQ dst_base+0(FP), DI - MOVQ DI, SI - MOVQ offset+24(FP), R11 - MOVQ length+32(FP), AX - -loop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP loop0 - -step1: - // if length > 64 { etc } - CMPL AX, $64 - JLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMPL AX, $12 - JGE step3 - CMPL R11, $2048 - JGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - DX &src[0] -// - SI &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVQ src_base+0(FP), DX - MOVQ src_len+8(FP), R14 - MOVQ i+24(FP), R15 - MOVQ j+32(FP), SI - ADDQ DX, R14 - ADDQ DX, R15 - ADDQ DX, SI - MOVQ R14, R13 - SUBQ $8, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA cmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE bsf - ADDQ $8, R15 - ADDQ $8, SI - JMP cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE extendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE extendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - AX . . -// - BX . . -// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). -// - DX 64 &src[0], tableSize -// - SI 72 &src[s] -// - DI 80 &dst[d] -// - R9 88 sLimit -// - R10 . &src[nextEmit] -// - R11 96 prevHash, currHash, nextHash, offset -// - R12 104 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 112 candidate -// -// The second column (56, 64, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. -TEXT ·encodeBlock(SB), 0, $32888-56 - MOVQ dst_base+0(FP), DI - MOVQ src_base+24(FP), SI - MOVQ src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVQ $24, CX - MOVQ $256, DX - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - CMPQ DX, $16384 - JGE varTable - CMPQ DX, R14 - JGE varTable - SUBQ $1, CX - SHLQ $1, DX - JMP calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU - // writes 16 bytes, so we can do only tableSize/8 writes instead of the - // 2048 writes that would zero-initialize all of table's 32768 bytes. - SHRQ $3, DX - LEAQ table-32768(SP), BX - PXOR X0, X0 - -memclr: - MOVOU X0, 0(BX) - ADDQ $16, BX - SUBQ $1, DX - JNZ memclr - - // !!! DX = &src[0] - MOVQ SI, DX - - // sLimit := len(src) - inputMargin - MOVQ R14, R9 - SUBQ $15, R9 - - // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't - // change for the rest of the function. - MOVQ CX, 56(SP) - MOVQ DX, 64(SP) - MOVQ R9, 88(SP) - - // nextEmit := 0 - MOVQ DX, R10 - - // s := 1 - ADDQ $1, SI - - // nextHash := hash(load32(src, s), shift) - MOVL 0(SI), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - -outer: - // for { etc } - - // skip := 32 - MOVQ $32, R12 - - // nextS := s - MOVQ SI, R13 - - // candidate := 0 - MOVQ $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVQ R13, SI - - // bytesBetweenHashLookups := skip >> 5 - MOVQ R12, R14 - SHRQ $5, R14 - - // nextS = s + bytesBetweenHashLookups - ADDQ R14, R13 - - // skip += bytesBetweenHashLookups - ADDQ R14, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVQ R13, AX - SUBQ DX, AX - CMPQ AX, R9 - JA emitRemainder - - // candidate = int(table[nextHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[nextHash] = uint16(s) - MOVQ SI, AX - SUBQ DX, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // nextHash = hash(load32(src, nextS), shift) - MOVL 0(R13), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVL 0(SI), AX - MOVL (DX)(R15*1), BX - CMPL AX, BX - JNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVQ SI, AX - SUBQ R10, AX - CMPQ AX, $16 - JLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT inlineEmitLiteralOneByte - CMPL BX, $256 - JLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVQ SI, 72(SP) - MOVQ DI, 80(SP) - MOVQ R15, 112(SP) - CALL runtime·memmove(SB) - MOVQ 56(SP), CX - MOVQ 64(SP), DX - MOVQ 72(SP), SI - MOVQ 80(SP), DI - MOVQ 88(SP), R9 - MOVQ 112(SP), R15 - JMP inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB AX, BX - SUBB $1, BX - SHLB $2, BX - MOVB BX, (DI) - ADDQ $1, DI - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(R10), X0 - MOVOU X0, 0(DI) - ADDQ AX, DI - -inner1: - // for { etc } - - // base := s - MOVQ SI, R12 - - // !!! offset := base - candidate - MOVQ R12, R11 - SUBQ R15, R11 - SUBQ DX, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVQ src_len+32(FP), R14 - ADDQ DX, R14 - - // !!! R13 = &src[len(src) - 8] - MOVQ R14, R13 - SUBQ $8, R13 - - // !!! R15 = &src[candidate + 4] - ADDQ $4, R15 - ADDQ DX, R15 - - // !!! s += 4 - ADDQ $4, SI - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA inlineExtendMatchCmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE inlineExtendMatchBSF - ADDQ $8, R15 - ADDQ $8, SI - JMP inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - JMP inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE inlineExtendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE inlineExtendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVQ SI, AX - SUBQ R12, AX - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - CMPL AX, $64 - JLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - CMPL AX, $12 - JGE inlineEmitCopyStep3 - CMPL R11, $2048 - JGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - JMP inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVQ SI, R10 - - // if s >= sLimit { goto emitRemainder } - MOVQ SI, AX - SUBQ DX, AX - CMPQ AX, R9 - JAE emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVQ -1(SI), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // table[prevHash] = uint16(s-1) - MOVQ SI, AX - SUBQ DX, AX - SUBQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // currHash := hash(uint32(x>>8), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // candidate = int(table[currHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[currHash] = uint16(s) - ADDQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVL (DX)(R15*1), BX - CMPL R14, BX - JEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // s++ - ADDQ $1, SI - - // break out of the inner1 for loop, i.e. continue the outer loop. - JMP outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVQ src_len+32(FP), AX - ADDQ DX, AX - CMPQ R10, AX - JEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVQ DI, 0(SP) - MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ R10, 24(SP) - SUBQ R10, AX - MOVQ AX, 32(SP) - MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVQ DI, 80(SP) - CALL ·emitLiteral(SB) - MOVQ 80(SP), DI - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADDQ 48(SP), DI - -encodeBlockEnd: - MOVQ dst_base+0(FP), AX - SUBQ AX, DI - MOVQ DI, d+48(FP) - RET diff --git a/vendor/github.com/klauspost/compress/snappy/runbench.cmd b/vendor/github.com/klauspost/compress/snappy/runbench.cmd deleted file mode 100644 index d24eb4b47..000000000 --- a/vendor/github.com/klauspost/compress/snappy/runbench.cmd +++ /dev/null @@ -1,2 +0,0 @@ -del old.txt -go test -bench=. >>old.txt && go test -bench=. >>old.txt && go test -bench=. >>old.txt && benchstat -delta-test=ttest old.txt new.txt diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index 07f7285f0..beb7fa872 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -16,30 +16,28 @@ Currently the package is heavily optimized for 64 bit processors and will be sig Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. -Godoc Documentation: https://godoc.org/github.com/klauspost/compress/zstd - +[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) ## Compressor ### Status: STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively -used by several projects. This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), -kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. There may still be specific combinations of data types/size/settings that could lead to edge cases, so as always, testing is recommended. For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. -The "Fastest" compression ratio is roughly equivalent to zstd level 1. -The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Fastest" compression ratio is roughly equivalent to zstd level 1. +* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Better" compression ratio is roughly equivalent to zstd level 7. +* The "Best" compression ratio is roughly equivalent to zstd level 11. In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. The compression ratio compared to stdlib is around level 3, but usually 3x as fast. -Compared to cgo zstd, the speed is around level 3 (default), but compression slightly worse, between level 1&2. - ### Usage @@ -54,11 +52,11 @@ To create a writer with default options, do like this: ```Go // Compress input to output. func Compress(in io.Reader, out io.Writer) error { - w, err := NewWriter(output) + enc, err := zstd.NewWriter(out) if err != nil { return err } - _, err := io.Copy(w, input) + _, err = io.Copy(enc, in) if err != nil { enc.Close() return err @@ -80,6 +78,9 @@ of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is in the future. So if you want to limit concurrency for future updates, specify the concurrency you would like. +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined compression settings can be specified. @@ -106,7 +107,8 @@ and seems to ignore concatenated streams, even though [it is part of the spec](h For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. `EncodeAll` will encode all input in src and append it to dst. -This function can be called concurrently, but each call will only run on a single goroutine. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. Encoded blocks can be concatenated and the result will be the combined input stream. Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. @@ -140,7 +142,7 @@ I have collected some speed examples to compare speed and compression against ot * `file` is the input file. * `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. -* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default". +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". * `insize`/`outsize` is the input/output size. * `millis` is the number of milliseconds used for compression. * `mb/s` is megabytes (2^20 bytes) per second. @@ -151,84 +153,106 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip This package: file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73101992 643 313.87 -silesia.tar zskp 2 211947520 67504318 969 208.38 -silesia.tar zskp 3 211947520 65177448 1899 106.44 +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 cgo zstd: silesia.tar zstd 1 211947520 73605392 543 371.56 silesia.tar zstd 3 211947520 66793289 864 233.68 silesia.tar zstd 6 211947520 62916450 1913 105.66 +silesia.tar zstd 9 211947520 60212393 5063 39.92 gzip, stdlib/this package: -silesia.tar gzstd 1 211947520 80007735 1654 122.21 -silesia.tar gzkp 1 211947520 80369488 1168 173.06 +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 GOB stream of binary data. Highly compressible. https://files.klauspost.com/compress/gob-stream.7z file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 235022249 3088 590.30 -gob-stream zskp 2 1911399616 205669791 3786 481.34 -gob-stream zskp 3 1911399616 185792019 9324 195.48 +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 + gob-stream zstd 1 1911399616 249810424 2637 691.26 gob-stream zstd 3 1911399616 208192146 3490 522.31 gob-stream zstd 6 1911399616 193632038 6687 272.56 -gob-stream gzstd 1 1911399616 357382641 10251 177.82 -gob-stream gzkp 1 1911399616 362156523 5695 320.08 +gob-stream zstd 9 1911399616 177620386 16175 112.70 + +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 The test data for the Large Text Compression Benchmark is the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. http://mattmahoney.net/dc/textdata.html file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343848582 3609 264.18 -enwik9 zskp 2 1000000000 317276632 5746 165.97 -enwik9 zskp 3 1000000000 294540704 11725 81.34 +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 + enwik9 zstd 1 1000000000 358072021 3110 306.65 enwik9 zstd 3 1000000000 313734672 4784 199.35 enwik9 zstd 6 1000000000 295138875 10290 92.68 -enwik9 gzstd 1 1000000000 382578136 9604 99.30 -enwik9 gzkp 1 1000000000 383825945 6544 145.73 +enwik9 zstd 9 1000000000 278348700 28549 33.40 + +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 Highly compressible JSON file. https://files.klauspost.com/compress/github-june-2days-2019.json.zst file out level insize outsize millis mb/s -github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 -github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 -github-june-2days-2019.json zskp 3 6273951764 537511906 29252 204.54 +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 + github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 -github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 -github-june-2days-2019.json gzkp 1 6273951764 1128755542 19236 311.03 +github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 + +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 VM Image, Linux mint with a few installed applications: https://files.klauspost.com/compress/rawstudio-mint14.7z file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 -rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 -rawstudio-mint14.tar zskp 3 8558382592 3224594213 71751 113.75 +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 + rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 -rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 -rawstudio-mint14.tar gzkp 1 8558382592 3970463184 41749 195.49 +rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 + +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 CSV data: https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst file out level insize outsize millis mb/s -nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 -nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 -nyc-taxi-data-10M.csv zskp 3 3325605752 538490114 19880 159.53 +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 + nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 -nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 -nyc-taxi-data-10M.csv gzkp 1 3325605752 924718719 16388 193.53 +nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 + +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 ``` ## Decompressor @@ -263,8 +287,13 @@ func Decompress(in io.Reader, out io.Writer) error { } ``` -It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. -See "Allocation-less operation" below. +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. For decoding buffers, it could look something like this: @@ -273,7 +302,7 @@ import "github.com/klauspost/compress/zstd" // Create a reader that caches decompressors. // For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil) +var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) // Decompress a buffer. We don't supply a destination buffer, // so it will be allocated by the decoder. @@ -283,9 +312,12 @@ func Decompress(src []byte) ([]byte, error) { ``` Both of these cases should provide the functionality needed. -The decoder can be used for *concurrent* decompression of multiple buffers. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + It will only allow a certain number of concurrent operations to run. -To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. ### Dictionaries @@ -337,70 +369,71 @@ In this case no unneeded allocations should be made. The buffer decoder does everything on the same goroutine and does nothing concurrently. It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. -The stream decoder operates on +The stream decoder will create goroutines that: -* One goroutine reads input and splits the input to several block decoders. -* A number of decoders will decode blocks. -* A goroutine coordinates these blocks and sends history from one to the next. +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. -In practice this means that concurrency is often limited to utilizing about 2 cores effectively. - - +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + ### Benchmarks -These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). - The first two are streaming decodes and the last are smaller inputs. - + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + ``` -BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op -BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op - -BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op -BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op - -Concurrent performance: - -BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op - -BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op ``` -This reflects the performance around May 2020, but this may be out of date. +This reflects the performance around May 2022, but this may be out of date. + +## Zstd inside ZIP files + +It is possible to use zstandard to compress individual files inside zip archives. +While this isn't widely supported it can be useful for internal files. + +To support the compression and decompression of these files you must register a compressor and decompressor. + +It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT +use the global registration functions. The main reason for this is that 2 registrations from +different packages will result in a panic. + +It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip +files concurrently, and using a single instance will allow reusing some resources. + +See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for +how to compress and decompress files inside zip archives. # Contributions Contributions are always welcome. For new features/fixes, remember to add tests and for performance enhancements include benchmarks. -For sending files for reproducing errors use a service like [goobox](https://goobox.io/#/upload) or similar to share your files. - For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go index 854458537..97299d499 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -7,6 +7,7 @@ package zstd import ( "encoding/binary" "errors" + "fmt" "io" "math/bits" ) @@ -50,16 +51,16 @@ func (b *bitReader) getBits(n uint8) int { if n == 0 /*|| b.bitsRead >= 64 */ { return 0 } - return b.getBitsFast(n) + return int(b.get32BitsFast(n)) } -// getBitsFast requires that at least one bit is requested every time. +// get32BitsFast requires that at least one bit is requested every time. // There are no checks if the buffer is filled. -func (b *bitReader) getBitsFast(n uint8) int { +func (b *bitReader) get32BitsFast(n uint8) uint32 { const regMask = 64 - 1 v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) b.bitsRead += n - return int(v) + return v } // fillFast() will make sure at least 32 bits are available. @@ -125,6 +126,9 @@ func (b *bitReader) remain() uint { func (b *bitReader) close() error { // Release reference. b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go index 303ae90f9..78b3c61be 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -5,8 +5,6 @@ package zstd -import "fmt" - // bitWriter will write bits. // First bit will be LSB of the first byte of output. type bitWriter struct { @@ -38,7 +36,7 @@ func (b *bitWriter) addBits16NC(value uint16, bits uint8) { b.nBits += bits } -// addBits32NC will add up to 32 bits. +// addBits32NC will add up to 31 bits. // It will not check if there is space for them, // so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits32NC(value uint32, bits uint8) { @@ -46,6 +44,26 @@ func (b *bitWriter) addBits32NC(value uint32, bits uint8) { b.nBits += bits } +// addBits64NC will add up to 64 bits. +// There must be space for 32 bits. +func (b *bitWriter) addBits64NC(value uint64, bits uint8) { + if bits <= 31 { + b.addBits32Clean(uint32(value), bits) + return + } + b.addBits32Clean(uint32(value), 32) + b.flush32() + b.addBits32Clean(uint32(value>>32), bits-32) +} + +// addBits32Clean will add up to 32 bits. +// It will not check if there is space for them. +// The input must not contain more bits than specified. +func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. // It will not check if there is space for them, so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { @@ -53,80 +71,6 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { b.nBits += bits } -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.bitContainer >>= v << 3 - b.nBits &= 7 -} - // flush32 will flush out, so there are at least 32 bits available for writing. func (b *bitWriter) flush32() { if b.nBits < 32 { diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 4733ea876..7eed729be 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -5,9 +5,14 @@ package zstd import ( + "bytes" + "encoding/binary" "errors" "fmt" "io" + "io/ioutil" + "os" + "path/filepath" "sync" "github.com/klauspost/compress/huff0" @@ -38,14 +43,14 @@ const ( // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) maxCompressedBlockSize = 128 << 10 + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + // Maximum possible block size (all Raw+Uncompressed). maxBlockSize = (1 << 21) - 1 - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header - maxCompressedLiteralSize = 1 << 18 - maxRLELiteralSize = 1 << 20 - maxMatchLen = 131074 - maxSequences = 0x7f00 + 0xffff + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff // We support slightly less than the reference decoder to be able to // use ints on 32 bit archs. @@ -76,20 +81,27 @@ type blockDec struct { // Window size of the block. WindowSize uint64 - history chan *history - input chan struct{} - result chan decodeOutput - sequenceBuf []seq - err error - decWG sync.WaitGroup + err error + + // Check against this crc + checkCRC []byte // Frame to use for singlethreaded decoding. // Should not be used by the decoder itself since parent may be another frame. localFrame *frameDec + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + // Block is RLE, this is the size. RLESize uint32 - tmp [4]byte Type blockType @@ -109,13 +121,8 @@ func (b *blockDec) String() string { func newBlockDec(lowMem bool) *blockDec { b := blockDec{ - lowMem: lowMem, - result: make(chan decodeOutput, 1), - input: make(chan struct{}, 1), - history: make(chan *history, 1), + lowMem: lowMem, } - b.decWG.Add(1) - go b.startDecoder() return &b } @@ -123,44 +130,60 @@ func newBlockDec(lowMem bool) *blockDec { // Input must be a start of a block and will be at the end of the block when returned. func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { b.WindowSize = windowSize - tmp := br.readSmall(3) - if tmp == nil { - if debug { - println("Reading block header:", io.ErrUnexpectedEOF) - } - return io.ErrUnexpectedEOF + tmp, err := br.readSmall(3) + if err != nil { + println("Reading block header:", err) + return err } bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) b.Last = bh&1 != 0 b.Type = blockType((bh >> 1) & 3) // find size. cSize := int(bh >> 3) - maxSize := maxBlockSize + maxSize := maxCompressedBlockSizeAlloc switch b.Type { case blockTypeReserved: return ErrReservedBlockType case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } b.RLESize = uint32(cSize) if b.lowMem { maxSize = cSize } cSize = 1 case blockTypeCompressed: - if debug { + if debugDecoder { println("Data size on stream:", cSize) } b.RLESize = 0 - maxSize = maxCompressedBlockSize + maxSize = maxCompressedBlockSizeAlloc if windowSize < maxCompressedBlockSize && b.lowMem { - maxSize = int(windowSize) + maxSize = int(windowSize) + compressedBlockOverAlloc } if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { - if debug { + if debugDecoder { printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) } return ErrCompressedSizeTooBig } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = 0 // We do not need a destination for raw blocks. maxSize = -1 @@ -170,19 +193,18 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { // Read block data. if cap(b.dataStorage) < cSize { - if b.lowMem { - b.dataStorage = make([]byte, 0, cSize) + if b.lowMem || cSize > maxCompressedBlockSize { + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) } else { - b.dataStorage = make([]byte, 0, maxBlockSize) + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) } } if cap(b.dst) <= maxSize { b.dst = make([]byte, 0, maxSize+1) } - var err error b.data, err = br.readBig(cSize, b.dataStorage) if err != nil { - if debug { + if debugDecoder { println("Reading block:", err, "(", cSize, ")", len(b.data)) printf("%T", br) } @@ -196,85 +218,14 @@ func (b *blockDec) sendErr(err error) { b.Last = true b.Type = blockTypeReserved b.err = err - b.input <- struct{}{} } // Close will release resources. // Closed blockDec cannot be reset. func (b *blockDec) Close() { - close(b.input) - close(b.history) - close(b.result) - b.decWG.Wait() } -// decodeAsync will prepare decoding the block when it receives input. -// This will separate output and history. -func (b *blockDec) startDecoder() { - defer b.decWG.Done() - for range b.input { - //println("blockDec: Got block input") - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxBlockSize) - } - } - o := decodeOutput{ - d: b, - b: b.dst[:b.RLESize], - err: nil, - } - v := b.data[0] - for i := range o.b { - o.b[i] = v - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeRaw: - o := decodeOutput{ - d: b, - b: b.data, - err: nil, - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeCompressed: - b.dst = b.dst[:0] - err := b.decodeCompressed(nil) - o := decodeOutput{ - d: b, - b: b.dst, - err: err, - } - if debug { - println("Decompressed to", len(b.dst), "bytes, error:", err) - } - b.result <- o - case blockTypeReserved: - // Used for returning errors. - <-b.history - b.result <- decodeOutput{ - d: b, - b: nil, - err: b.err, - } - default: - panic("Invalid block type") - } - if debug { - println("blockDec: Finished block") - } - } -} - -// decodeAsync will prepare decoding the block when it receives the history. -// If history is provided, it will not fetch it from the channel. +// decodeBuf func (b *blockDec) decodeBuf(hist *history) error { switch b.Type { case blockTypeRLE: @@ -297,14 +248,23 @@ func (b *blockDec) decodeBuf(hist *history) error { return nil case blockTypeCompressed: saved := b.dst - b.dst = hist.b - hist.b = nil + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } err := b.decodeCompressed(hist) - if debug { + if debugDecoder { println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) } - hist.b = b.dst - b.dst = saved + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } return err case blockTypeReserved: // Used for returning errors. @@ -314,30 +274,18 @@ func (b *blockDec) decodeBuf(hist *history) error { } } -// decodeCompressed will start decompressing a block. -// If no history is supplied the decoder will decodeAsync as much as possible -// before fetching from blockDec.history -func (b *blockDec) decodeCompressed(hist *history) error { - in := b.data - delayedHistory := hist == nil - - if delayedHistory { - // We must always grab history. - defer func() { - if hist == nil { - <-b.history - } - }() - } +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header if len(in) < 2 { - return ErrBlockTooSmall + return in, ErrBlockTooSmall } + litType := literalsBlockType(in[0] & 3) var litRegenSize int var litCompSize int sizeFormat := (in[0] >> 2) & 3 var fourStreams bool + var literals []byte switch litType { case literalsBlockRaw, literalsBlockRLE: switch sizeFormat { @@ -353,7 +301,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. if len(in) < 3 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) in = in[3:] @@ -364,7 +312,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). if len(in) < 3 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) litRegenSize = int(n & 1023) @@ -375,7 +323,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { fourStreams = true if len(in) < 4 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) litRegenSize = int(n & 16383) @@ -385,7 +333,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { fourStreams = true if len(in) < 5 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) litRegenSize = int(n & 262143) @@ -393,16 +341,18 @@ func (b *blockDec) decodeCompressed(hist *history) error { in = in[5:] } } - if debug { + if debugDecoder { println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) } - var literals []byte - var huff *huff0.Scratch + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + switch litType { case literalsBlockRaw: if len(in) < litRegenSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } literals = in[:litRegenSize] in = in[litRegenSize:] @@ -410,19 +360,13 @@ func (b *blockDec) decodeCompressed(hist *history) error { case literalsBlockRLE: if len(in) < 1 { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } if cap(b.literalBuf) < litRegenSize { if b.lowMem { - b.literalBuf = make([]byte, litRegenSize) + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) } else { - if litRegenSize > maxCompressedLiteralSize { - // Exceptional - b.literalBuf = make([]byte, litRegenSize) - } else { - b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) - - } + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) } } literals = b.literalBuf[:litRegenSize] @@ -431,45 +375,79 @@ func (b *blockDec) decodeCompressed(hist *history) error { literals[i] = v } in = in[1:] - if debug { + if debugDecoder { printf("Found %d RLE compressed literals\n", litRegenSize) } case literalsBlockTreeless: if len(in) < litCompSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } // Store compressed literals, so we defer decoding until we get history. literals = in[:litCompSize] in = in[litCompSize:] - if debug { + if debugDecoder { printf("Found %d compressed literals\n", litCompSize) } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = litRegenSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + case literalsBlockCompressed: if len(in) < litCompSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } literals = in[:litCompSize] in = in[litCompSize:] - huff = huffDecoderPool.Get().(*huff0.Scratch) - var err error // Ensure we have space to store it. if cap(b.literalBuf) < litRegenSize { if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) } } - if huff == nil { - huff = &huff0.Scratch{} + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } } + var err error huff, literals, err = huff0.ReadTable(literals, huff) if err != nil { println("reading huffman table:", err) - return err + return in, err } + hist.huffTree = huff + huff.MaxDecodedSize = litRegenSize // Use our out buffer. if fourStreams { literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) @@ -478,27 +456,63 @@ func (b *blockDec) decodeCompressed(hist *history) error { } if err != nil { println("decoding compressed literals:", err) - return err + return in, err } // Make sure we don't leak our literals buffer if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) } - if debug { + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] + if debugDecoder { printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) } } + hist.decoders.literals = literals + return in, nil +} +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} + +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } // Decode Sequences // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section if len(in) < 1 { return ErrBlockTooSmall } + var nSeqs int seqHeader := in[0] - nSeqs := 0 switch { - case seqHeader == 0: - in = in[1:] case seqHeader < 128: nSeqs = int(seqHeader) in = in[1:] @@ -515,19 +529,16 @@ func (b *blockDec) decodeCompressed(hist *history) error { nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) in = in[3:] } - // Allocate sequences - if cap(b.sequenceBuf) < nSeqs { - if b.lowMem { - b.sequenceBuf = make([]seq, nSeqs) - } else { - // Allocate max - b.sequenceBuf = make([]seq, nSeqs, maxSequences) + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) } - } else { - // Reuse buffer - b.sequenceBuf = b.sequenceBuf[:nSeqs] + return ErrUnexpectedBlockSize } - var seqs = &sequenceDecs{} + + var seqs = &hist.decoders + seqs.nSeqs = nSeqs if nSeqs > 0 { if len(in) < 1 { return ErrBlockTooSmall @@ -535,12 +546,12 @@ func (b *blockDec) decodeCompressed(hist *history) error { br := byteReader{b: in, off: 0} compMode := br.Uint8() br.advance(1) - if debug { + if debugDecoder { printf("Compression modes: 0b%b", compMode) } for i := uint(0); i < 3; i++ { mode := seqCompMode((compMode >> (6 - i*2)) & 3) - if debug { + if debugDecoder { println("Table", tableIndex(i), "is", mode) } var seq *sequenceDec @@ -556,6 +567,9 @@ func (b *blockDec) decodeCompressed(hist *history) error { } switch mode { case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } seq.fse = &fsePredef[i] case compModeRLE: if br.remain() < 1 { @@ -563,34 +577,36 @@ func (b *blockDec) decodeCompressed(hist *history) error { } v := br.Uint8() br.advance(1) - dec := fseDecoderPool.Get().(*fseDecoder) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } symb, err := decSymbolValue(v, symbolTableX[i]) if err != nil { printf("RLE Transform table (%v) error: %v", tableIndex(i), err) return err } - dec.setRLE(symb) - seq.fse = dec - if debug { + seq.fse.setRLE(symb) + if debugDecoder { printf("RLE set to %+v, code: %v", symb, v) } case compModeFSE: println("Reading table for", tableIndex(i)) - dec := fseDecoderPool.Get().(*fseDecoder) - err := dec.readNCount(&br, uint16(maxTableSymbol[i])) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) if err != nil { println("Read table error:", err) return err } - err = dec.transform(symbolTableX[i]) + err = seq.fse.transform(symbolTableX[i]) if err != nil { println("Transform table error:", err) return err } - if debug { - println("Read table ok", "symbolLen:", dec.symbolLen) + if debugDecoder { + println("Read table ok", "symbolLen:", seq.fse.symbolLen) } - seq.fse = dec case compModeRepeat: seq.repeat = true } @@ -600,140 +616,106 @@ func (b *blockDec) decodeCompressed(hist *history) error { } in = br.unread() } - - // Wait for history. - // All time spent after this is critical since it is strictly sequential. - if hist == nil { - hist = <-b.history - if hist.error { - return ErrDecoderClosed - } + if debugDecoder { + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") } - // Decode treeless literal block. - if litType == literalsBlockTreeless { - // TODO: We could send the history early WITHOUT the stream history. - // This would allow decoding treeless literials before the byte history is available. - // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless. - // So not much obvious gain here. - - if hist.huffTree == nil { - return errors.New("literal block was treeless, but no history was defined") - } - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) - } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) - } - } - var err error - // Use our out buffer. - huff = hist.huffTree - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - // Make sure we don't leak our literals buffer - if err != nil { - println("decompressing literals:", err) - return err - } - if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - } else { - if hist.huffTree != nil && huff != nil { - if hist.dict == nil || hist.dict.litEnc != hist.huffTree { - huffDecoderPool.Put(hist.huffTree) - } - hist.huffTree = nil + if nSeqs == 0 { + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] } + return nil } - if huff != nil { - hist.huffTree = huff + br := seqs.br + if br == nil { + br = &bitReader{} } - if debug { - println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") + if err := br.init(in); err != nil { + return err } - if nSeqs == 0 { - // Decompressed content is defined entirely as Literals Section content. - b.dst = append(b.dst, literals...) - if delayedHistory { - hist.append(literals) + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } } - return nil + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) } - seqs, err := seqs.mergeHistory(&hist.decoders) - if err != nil { - return err - } - if debug { - println("History merged ok") + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } } - br := &bitReader{} - if err := br.init(in); err != nil { - return err + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets - // TODO: Investigate if sending history without decoders are faster. - // This would allow the sequences to be decoded async and only have to construct stream history. - // If only recent offsets were not transferred, this would be an obvious win. - // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} +func (b *blockDec) executeSequences(hist *history) error { hbytes := hist.b if len(hbytes) > hist.windowSize { hbytes = hbytes[len(hbytes)-hist.windowSize:] - // We do not need history any more. + // We do not need history anymore. if hist.dict != nil { hist.dict.content = nil } } - - if err := seqs.initialize(br, hist, literals, b.dst); err != nil { - println("initializing sequences:", err) - return err - } - - err = seqs.decode(nSeqs, br, hbytes) + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) if err != nil { return err } - if !br.finished() { - return fmt.Errorf("%d extra bits on block, should be 0", br.remain()) - } + return b.updateHistory(hist) +} - err = br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } +func (b *blockDec) updateHistory(hist *history) error { if len(b.data) > maxCompressedBlockSize { return fmt.Errorf("compressed block size too large (%d)", len(b.data)) } // Set output and release references. - b.dst = seqs.out - seqs.out, seqs.literals, seqs.hist = nil, nil, nil + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset - if !delayedHistory { - // If we don't have delayed history, no need to update. - hist.recentOffsets = seqs.prevOffset - return nil - } if b.Last { // if last block we don't care about history. println("Last block, no history returned") hist.b = hist.b[:0] return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } } - hist.append(b.dst) - hist.recentOffsets = seqs.prevOffset - if debug { - println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") - } + hist.decoders.out, hist.decoders.literals = nil, nil return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 083fbb502..12e8f6f0b 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -22,28 +22,44 @@ type blockEnc struct { dictLitEnc *huff0.Scratch wr bitWriter - extraLits int - last bool - + extraLits int output []byte recentOffsets [3]uint32 prevRecentOffsets [3]uint32 + + last bool + lowMem bool } // init should be used once the block has been created. // If called more than once, the effect is the same as calling reset. func (b *blockEnc) init() { - if cap(b.literals) < maxCompressedLiteralSize { - b.literals = make([]byte, 0, maxCompressedLiteralSize) - } - const defSeqs = 200 - b.literals = b.literals[:0] - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - if cap(b.output) < maxCompressedBlockSize { - b.output = make([]byte, 0, maxCompressedBlockSize) + if b.lowMem { + // 1K literals + if cap(b.literals) < 1<<10 { + b.literals = make([]byte, 0, 1<<10) + } + const defSeqs = 20 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + // 1K + if cap(b.output) < 1<<10 { + b.output = make([]byte, 0, 1<<10) + } + } else { + if cap(b.literals) < maxCompressedBlockSize { + b.literals = make([]byte, 0, maxCompressedBlockSize) + } + const defSeqs = 2000 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } } + if b.coders.mlEnc == nil { b.coders.mlEnc = &fseEncoder{} b.coders.mlPrev = &fseEncoder{} @@ -76,6 +92,7 @@ func (b *blockEnc) reset(prev *blockEnc) { if prev != nil { b.recentOffsets = prev.prevRecentOffsets } + b.dictLitEnc = nil } // reset will reset the block for a new encode, but in the same stream, @@ -139,7 +156,7 @@ func (h *literalsHeader) setSize(regenLen int) { switch { case inBits < 5: lh |= (uint64(regenLen) << 3) | (1 << 60) - if debug { + if debugEncoder { got := int(lh>>3) & 0xff if got != regenLen { panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) @@ -167,7 +184,7 @@ func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { lh |= 1 << 2 } lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) - if debug { + if debugEncoder { const mmask = (1 << 24) - 1 n := (lh >> 4) & mmask if int(n&1023) != inLen { @@ -295,7 +312,7 @@ func (b *blockEnc) encodeRaw(a []byte) { bh.setType(blockTypeRaw) b.output = bh.appendTo(b.output[:0]) b.output = append(b.output, a...) - if debug { + if debugEncoder { println("Adding RAW block, length", len(a), "last:", b.last) } } @@ -308,7 +325,7 @@ func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { bh.setType(blockTypeRaw) dst = bh.appendTo(dst) dst = append(dst, src...) - if debug { + if debugEncoder { println("Adding RAW block, length", len(src), "last:", b.last) } return dst @@ -322,7 +339,7 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { // Don't compress extremely small blocks if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { - if debug { + if debugEncoder { println("Adding RAW block, length", len(lits), "last:", b.last) } bh.setType(blockTypeRaw) @@ -354,7 +371,7 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { switch err { case huff0.ErrIncompressible: - if debug { + if debugEncoder { println("Adding RAW block, length", len(lits), "last:", b.last) } bh.setType(blockTypeRaw) @@ -362,16 +379,16 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { b.output = append(b.output, lits...) return nil case huff0.ErrUseRLE: - if debug { + if debugEncoder { println("Adding RLE block, length", len(lits)) } bh.setType(blockTypeRLE) b.output = bh.appendTo(b.output) b.output = append(b.output, lits[0]) return nil + case nil: default: return err - case nil: } // Compressed... // Now, allow reuse @@ -379,12 +396,12 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { bh.setType(blockTypeCompressed) var lh literalsHeader if reUsed { - if debug { + if debugEncoder { println("Reused tree, compressed to", len(out)) } lh.setType(literalsBlockTreeless) } else { - if debug { + if debugEncoder { println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) } lh.setType(literalsBlockCompressed) @@ -409,7 +426,7 @@ func fuzzFseEncoder(data []byte) int { return 0 } enc := fseEncoder{} - hist := enc.Histogram()[:256] + hist := enc.Histogram() maxSym := uint8(0) for i, v := range data { v = v & 63 @@ -500,7 +517,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { lh.setSize(len(b.literals)) b.output = lh.appendTo(b.output) b.output = append(b.output, b.literals...) - if debug { + if debugEncoder { println("Adding literals RAW, length", len(b.literals)) } case huff0.ErrUseRLE: @@ -508,27 +525,22 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { lh.setSize(len(b.literals)) b.output = lh.appendTo(b.output) b.output = append(b.output, b.literals[0]) - if debug { + if debugEncoder { println("Adding literals RLE") } - default: - if debug { - println("Adding literals ERROR:", err) - } - return err case nil: // Compressed litLen... if reUsed { - if debug { + if debugEncoder { println("reused tree") } lh.setType(literalsBlockTreeless) } else { - if debug { + if debugEncoder { println("new tree, size:", len(b.litEnc.OutTable)) } lh.setType(literalsBlockCompressed) - if debug { + if debugEncoder { _, _, err := huff0.ReadTable(out, nil) if err != nil { panic(err) @@ -536,16 +548,21 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { } } lh.setSizes(len(out), len(b.literals), single) - if debug { + if debugEncoder { printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) println("Adding literal header:", lh) } b.output = lh.appendTo(b.output) b.output = append(b.output, out...) b.litEnc.Reuse = huff0.ReusePolicyAllow - if debug { + if debugEncoder { println("Adding literals compressed") } + default: + if debugEncoder { + println("Adding literals ERROR:", err) + } + return err } // Sequence compression @@ -560,7 +577,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { n := len(b.sequences) - 0x7f00 b.output = append(b.output, 255, uint8(n), uint8(n>>8)) } - if debug { + if debugEncoder { println("Encoding", len(b.sequences), "sequences") } b.genCodes() @@ -594,17 +611,17 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { nSize = nSize + (nSize+2*8*16)>>4 switch { case predefSize <= prevSize && predefSize <= nSize || forcePreDef: - if debug { + if debugEncoder { println("Using predefined", predefSize>>3, "<=", nSize>>3) } return preDef, compModePredefined case prevSize <= nSize: - if debug { + if debugEncoder { println("Using previous", prevSize>>3, "<=", nSize>>3) } return prev, compModeRepeat default: - if debug { + if debugEncoder { println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) } @@ -617,7 +634,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if llEnc.useRLE { mode |= uint8(compModeRLE) << 6 llEnc.setRLE(b.sequences[0].llCode) - if debug { + if debugEncoder { println("llEnc.useRLE") } } else { @@ -628,7 +645,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if ofEnc.useRLE { mode |= uint8(compModeRLE) << 4 ofEnc.setRLE(b.sequences[0].ofCode) - if debug { + if debugEncoder { println("ofEnc.useRLE") } } else { @@ -640,7 +657,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if mlEnc.useRLE { mode |= uint8(compModeRLE) << 2 mlEnc.setRLE(b.sequences[0].mlCode) - if debug { + if debugEncoder { println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) } } else { @@ -649,7 +666,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { mode |= uint8(m) << 2 } b.output = append(b.output, mode) - if debug { + if debugEncoder { printf("Compression modes: 0b%b", mode) } b.output, err = llEnc.writeCount(b.output) @@ -705,52 +722,53 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) } seq-- - if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 { - // No need to flush (common) - for seq >= 0 { - s = b.sequences[seq] - wr.flush32() - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - // tabelog max is 8 for all. - of.encode(ofB) - ml.encode(mlB) - ll.encode(llB) - wr.flush32() - - // We checked that all can stay within 32 bits - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.addBits32NC(s.offset, ofB.outBits) - - if debugSequences { - println("Encoded seq", seq, s) - } - - seq-- + // Store sequences in reverse... + for seq >= 0 { + s = b.sequences[seq] + + ofB := ofTT[s.ofCode] + wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. + //of.encode(ofB) + nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 + dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) + wr.addBits16NC(of.state, uint8(nbBitsOut)) + of.state = of.stateTable[dstState] + + // Accumulate extra bits. + outBits := ofB.outBits & 31 + extraBits := uint64(s.offset & bitMask32[outBits]) + extraBitsN := outBits + + mlB := mlTT[s.mlCode] + //ml.encode(mlB) + nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 + dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) + wr.addBits16NC(ml.state, uint8(nbBitsOut)) + ml.state = ml.stateTable[dstState] + + outBits = mlB.outBits & 31 + extraBits = extraBits<> 16 + dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) + wr.addBits16NC(ll.state, uint8(nbBitsOut)) + ll.state = ll.stateTable[dstState] + + outBits = llB.outBits & 31 + extraBits = extraBits<= 0 { - s = b.sequences[seq] - wr.flush32() - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - // tabelog max is below 8 for each. - of.encode(ofB) - ml.encode(mlB) - ll.encode(llB) - wr.flush32() - - // ml+ll = max 32 bits total - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.flush32() - wr.addBits32NC(s.offset, ofB.outBits) - - if debugSequences { - println("Encoded seq", seq, s) - } - seq-- - } + seq-- } ml.flush(mlEnc.actualTableLog) of.flush(ofEnc.actualTableLog) @@ -769,7 +787,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { // Size is output minus block header. bh.setSize(uint32(len(b.output)-bhOffset) - 3) - if debug { + if debugEncoder { println("Rewriting block header", bh) } _ = bh.appendTo(b.output[bhOffset:bhOffset]) @@ -784,14 +802,13 @@ func (b *blockEnc) genCodes() { // nothing to do return } - if len(b.sequences) > math.MaxUint16 { panic("can only encode up to 64K sequences") } // No bounds checks after here: - llH := b.coders.llEnc.Histogram()[:256] - ofH := b.coders.ofEnc.Histogram()[:256] - mlH := b.coders.mlEnc.Histogram()[:256] + llH := b.coders.llEnc.Histogram() + ofH := b.coders.ofEnc.Histogram() + mlH := b.coders.mlEnc.Histogram() for i := range llH { llH[i] = 0 } @@ -803,7 +820,8 @@ func (b *blockEnc) genCodes() { } var llMax, ofMax, mlMax uint8 - for i, seq := range b.sequences { + for i := range b.sequences { + seq := &b.sequences[i] v := llCode(seq.litLen) seq.llCode = v llH[v]++ @@ -827,7 +845,6 @@ func (b *blockEnc) genCodes() { panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) } } - b.sequences[i] = seq } maxCount := func(a []uint32) int { var max uint32 diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index 658ef7838..4493baa75 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -12,8 +12,8 @@ import ( type byteBuffer interface { // Read up to 8 bytes. - // Returns nil if no more input is available. - readSmall(n int) []byte + // Returns io.ErrUnexpectedEOF if this cannot be satisfied. + readSmall(n int) ([]byte, error) // Read >8 bytes. // MAY use the destination slice. @@ -29,17 +29,17 @@ type byteBuffer interface { // in-memory buffer type byteBuf []byte -func (b *byteBuf) readSmall(n int) []byte { +func (b *byteBuf) readSmall(n int) ([]byte, error) { if debugAsserts && n > 8 { panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) } bb := *b if len(bb) < n { - return nil + return nil, io.ErrUnexpectedEOF } r := bb[:n] *b = bb[n:] - return r + return r, nil } func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { @@ -52,10 +52,6 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { return r, nil } -func (b *byteBuf) remain() []byte { - return *b -} - func (b *byteBuf) readByte() (byte, error) { bb := *b if len(bb) < 1 { @@ -81,19 +77,22 @@ type readerWrapper struct { tmp [8]byte } -func (r *readerWrapper) readSmall(n int) []byte { +func (r *readerWrapper) readSmall(n int) ([]byte, error) { if debugAsserts && n > 8 { panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) } n2, err := io.ReadFull(r.r, r.tmp[:n]) // We only really care about the actual bytes read. - if n2 != n { - if debug { + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + if debugDecoder { println("readSmall: got", n2, "want", n, "err", err) } - return nil + return nil, err } - return r.tmp[:n] + return r.tmp[:n], nil } func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { @@ -110,6 +109,9 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { func (r *readerWrapper) readByte() (byte, error) { n2, err := r.r.Read(r.tmp[:1]) if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } return 0, err } if n2 != 1 { diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go index 2c4fca17f..0e59a242d 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytereader.go +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -13,12 +13,6 @@ type byteReader struct { off int } -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - // advance the stream b n bytes. func (b *byteReader) advance(n uint) { b.off += int(n) diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go new file mode 100644 index 000000000..5022e71c8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -0,0 +1,230 @@ +// Copyright 2020+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "io" +) + +// HeaderMaxSize is the maximum size of a Frame and Block Header. +// If less is sent to Header.Decode it *may* still contain enough information. +const HeaderMaxSize = 14 + 3 + +// Header contains information about the first frame and block within that. +type Header struct { + // SingleSegment specifies whether the data is to be decompressed into a + // single contiguous memory segment. + // It implies that WindowSize is invalid and that FrameContentSize is valid. + SingleSegment bool + + // WindowSize is the window of data to keep while decoding. + // Will only be set if SingleSegment is false. + WindowSize uint64 + + // Dictionary ID. + // If 0, no dictionary. + DictionaryID uint32 + + // HasFCS specifies whether FrameContentSize has a valid value. + HasFCS bool + + // FrameContentSize is the expected uncompressed size of the entire frame. + FrameContentSize uint64 + + // Skippable will be true if the frame is meant to be skipped. + // This implies that FirstBlock.OK is false. + Skippable bool + + // SkippableID is the user-specific ID for the skippable frame. + // Valid values are between 0 to 15, inclusive. + SkippableID int + + // SkippableSize is the length of the user data to skip following + // the header. + SkippableSize uint32 + + // HeaderSize is the raw size of the frame header. + // + // For normal frames, it includes the size of the magic number and + // the size of the header (per section 3.1.1.1). + // It does not include the size for any data blocks (section 3.1.1.2) nor + // the size for the trailing content checksum. + // + // For skippable frames, this counts the size of the magic number + // along with the size of the size field of the payload. + // It does not include the size of the skippable payload itself. + // The total frame size is the HeaderSize plus the SkippableSize. + HeaderSize int + + // First block information. + FirstBlock struct { + // OK will be set if first block could be decoded. + OK bool + + // Is this the last block of a frame? + Last bool + + // Is the data compressed? + // If true CompressedSize will be populated. + // Unfortunately DecompressedSize cannot be determined + // without decoding the blocks. + Compressed bool + + // DecompressedSize is the expected decompressed size of the block. + // Will be 0 if it cannot be determined. + DecompressedSize int + + // CompressedSize of the data in the block. + // Does not include the block header. + // Will be equal to DecompressedSize if not Compressed. + CompressedSize int + } + + // If set there is a checksum present for the block content. + // The checksum field at the end is always 4 bytes long. + HasCheckSum bool +} + +// Decode the header from the beginning of the stream. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) Decode(in []byte) error { + *h = Header{} + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + b, in := in[:4], in[4:] + if !bytes.Equal(b, frameMagic) { + if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { + return ErrMagicMismatch + } + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + h.Skippable = true + h.SkippableID = int(b[0] & 0xf) + h.SkippableSize = binary.LittleEndian.Uint32(in) + return nil + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + fhd, in := in[0], in[1:] + h.HeaderSize++ + h.SingleSegment = fhd&(1<<5) != 0 + h.HasCheckSum = fhd&(1<<2) != 0 + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + if !h.SingleSegment { + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + var wd byte + wd, in = in[0], in[1:] + h.HeaderSize++ + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + h.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + if len(in) < int(size) { + return io.ErrUnexpectedEOF + } + b, in = in[:size], in[size:] + h.HeaderSize += int(size) + switch size { + case 1: + h.DictionaryID = uint32(b[0]) + case 2: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if h.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + + if fcsSize > 0 { + h.HasFCS = true + if len(in) < fcsSize { + return io.ErrUnexpectedEOF + } + b, in = in[:fcsSize], in[fcsSize:] + h.HeaderSize += int(fcsSize) + switch fcsSize { + case 1: + h.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + } + + // Frame Header done, we will not fail from now on. + if len(in) < 3 { + return nil + } + tmp := in[:3] + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + h.FirstBlock.Last = bh&1 != 0 + blockType := blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + switch blockType { + case blockTypeReserved: + return nil + case blockTypeRLE: + h.FirstBlock.Compressed = true + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = 1 + case blockTypeCompressed: + h.FirstBlock.Compressed = true + h.FirstBlock.CompressedSize = cSize + case blockTypeRaw: + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = cSize + default: + panic("Invalid block type") + } + + h.FirstBlock.OK = true + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index d78be6d42..286c8f9d7 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -6,9 +6,12 @@ package zstd import ( "bytes" - "errors" + "context" + "encoding/binary" "io" "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" ) // Decoder provides decoding of zstandard streams. @@ -23,12 +26,19 @@ type Decoder struct { // Unreferenced decoders, ready for use. decoders chan *blockDec - // Streams ready to be decoded. - stream chan decodeStream - // Current read position used for Reader functionality. current decoderState + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + } + + frame *frameDec + // Custom dictionaries. // Always uses copies. dicts map[uint32]dict @@ -47,7 +57,10 @@ type decoderState struct { output chan decodeOutput // cancel remaining output. - cancel chan struct{} + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest flushed bool } @@ -82,9 +95,13 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { return nil, err } } - d.current.output = make(chan decodeOutput, d.o.concurrent) + d.current.crc = xxhash.New() d.current.flushed = true + if r == nil { + d.current.err = ErrDecoderNilInput + } + // Transfer option dicts. d.dicts = make(map[uint32]dict, len(d.o.dicts)) for _, dc := range d.o.dicts { @@ -110,9 +127,6 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { // Returns the number of bytes written and any error that occurred. // When the stream is done, io.EOF will be returned. func (d *Decoder) Read(p []byte) (int, error) { - if d.stream == nil { - return 0, errors.New("no input has been initialized") - } var n int for { if len(d.current.b) > 0 { @@ -130,12 +144,12 @@ func (d *Decoder) Read(p []byte) (int, error) { break } if !d.nextBlock(n == 0) { - return n, nil + return n, d.current.err } } } if len(d.current.b) > 0 { - if debug { + if debugDecoder { println("returning", n, "still bytes left:", len(d.current.b)) } // Only return error at end of block @@ -144,7 +158,7 @@ func (d *Decoder) Read(p []byte) (int, error) { if d.current.err != nil { d.drainOutput() } - if debug { + if debugDecoder { println("returning", n, d.current.err, len(d.decoders)) } return n, d.current.err @@ -152,28 +166,33 @@ func (d *Decoder) Read(p []byte) (int, error) { // Reset will reset the decoder the supplied stream after the current has finished processing. // Note that this functionality cannot be used after Close has been called. +// Reset can be called with a nil reader to release references to the previous reader. +// After being called with a nil reader, no other operations than Reset or DecodeAll or Close +// should be used. func (d *Decoder) Reset(r io.Reader) error { if d.current.err == ErrDecoderClosed { return d.current.err } - if r == nil { - return errors.New("nil Reader sent as input") - } - - if d.stream == nil { - d.stream = make(chan decodeStream, 1) - d.streamWg.Add(1) - go d.startStreamDecoder(d.stream) - } d.drainOutput() - // If bytes buffer and < 1MB, do sync decoding anyway. - if bb, ok := r.(*bytes.Buffer); ok && bb.Len() < 1<<20 { - if debug { + d.syncStream.br.r = nil + if r == nil { + d.current.err = ErrDecoderNilInput + if len(d.current.b) > 0 { + d.current.b = d.current.b[:0] + } + d.current.flushed = true + return nil + } + + // If bytes buffer and < 5MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < 5<<20 { + bb2 := bb + if debugDecoder { println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) } - b := bb.Bytes() + b := bb2.Bytes() var dst []byte if cap(d.current.b) > 0 { dst = d.current.b @@ -186,36 +205,48 @@ func (d *Decoder) Reset(r io.Reader) error { d.current.b = dst d.current.err = err d.current.flushed = true - if debug { + if debugDecoder { println("sync decode to", len(dst), "bytes, err:", err) } return nil } - // Remove current block. + d.stashDecoder() d.current.decodeOutput = decodeOutput{} d.current.err = nil - d.current.cancel = make(chan struct{}) d.current.flushed = false d.current.d = nil - d.stream <- decodeStream{ - r: r, - output: d.current.output, - cancel: d.current.cancel, + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + return nil } // drainOutput will drain the output until errEndOfStream is sent. func (d *Decoder) drainOutput() { if d.current.cancel != nil { - println("cancelling current") - close(d.current.cancel) + if debugDecoder { + println("cancelling current") + } + d.current.cancel() d.current.cancel = nil } if d.current.d != nil { - if debug { + if debugDecoder { printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) } d.decoders <- d.current.d @@ -226,39 +257,31 @@ func (d *Decoder) drainOutput() { println("current already flushed") return } - for { - select { - case v := <-d.current.output: - if v.d != nil { - if debug { - printf("re-adding decoder %p", v.d) - } - d.decoders <- v.d - } - if v.err == errEndOfStream { - println("current flushed") - d.current.flushed = true - return + for v := range d.current.output { + if v.d != nil { + if debugDecoder { + printf("re-adding decoder %p", v.d) } + d.decoders <- v.d } } + d.current.output = nil + d.current.flushed = true } // WriteTo writes data to w until there's no more data to write or when an error occurs. // The return value n is the number of bytes written. // Any error encountered during the write is also returned. func (d *Decoder) WriteTo(w io.Writer) (int64, error) { - if d.stream == nil { - return 0, errors.New("no input has been initialized") - } var n int64 for { if len(d.current.b) > 0 { n2, err2 := w.Write(d.current.b) n += int64(n2) - if err2 != nil && d.current.err == nil { + if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { d.current.err = err2 - break + } else if n2 != len(d.current.b) { + d.current.err = io.ErrShortWrite } } if d.current.err != nil { @@ -282,7 +305,7 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) { // DecodeAll can be used concurrently. // The Decoder concurrency limits will be respected. func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { - if d.current.err == ErrDecoderClosed { + if d.decoders == nil { return dst, ErrDecoderClosed } @@ -290,11 +313,14 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { block := <-d.decoders frame := block.localFrame defer func() { - if debug { + if debugDecoder { printf("re-adding decoder: %p", block) } frame.rawInput = nil frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } d.decoders <- block }() frame.bBuf = input @@ -302,40 +328,50 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { for { frame.history.reset() err := frame.reset(&frame.bBuf) - if err == io.EOF { - if debug { - println("frame reset return EOF") + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil } - return dst, nil + return dst, err } if frame.DictionaryID != nil { dict, ok := d.dicts[*frame.DictionaryID] if !ok { return nil, ErrUnknownDictionary } + if debugDecoder { + println("setting dict", frame.DictionaryID) + } frame.history.setDict(&dict) } - if err != nil { - return dst, err - } - if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { - return dst, ErrDecoderSizeExceeded + if frame.WindowSize > d.o.maxWindowSize { + return dst, ErrWindowSizeExceeded } - if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { - // Never preallocate moe than 1 GB up front. - if uint64(cap(dst)) < frame.FrameContentSize { - dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { + return dst, ErrDecoderSizeExceeded + } + if cap(dst)-len(dst) < int(frame.FrameContentSize) { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) copy(dst2, dst) dst = dst2 } } + if cap(dst) == 0 { - // Allocate window size * 2 by default if nothing is provided and we didn't get frame content size. - size := frame.WindowSize * 2 + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := len(input) * 2 // Cap to 1 MB. if size > 1<<20 { size = 1 << 20 } + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } dst = make([]byte, 0, size) } @@ -344,7 +380,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { return dst, err } if len(frame.bBuf) == 0 { - if debug { + if debugDecoder { println("frame dbuf empty") } break @@ -359,33 +395,176 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { // If non-blocking mode is used the returned boolean will be false // if no data was available without blocking. func (d *Decoder) nextBlock(blocking bool) (ok bool) { - if d.current.d != nil { - if debug { - printf("re-adding current decoder %p", d.current.d) - } - d.decoders <- d.current.d - d.current.d = nil - } if d.current.err != nil { // Keep error state. - return blocking + return false + } + d.current.b = d.current.b[:0] + + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok } + //ASYNC: + d.stashDecoder() if blocking { - d.current.decodeOutput = <-d.current.output + d.current.decodeOutput, ok = <-d.current.output } else { select { - case d.current.decodeOutput = <-d.current.output: + case d.current.decodeOutput, ok = <-d.current.output: default: return false } } - if debug { - println("got", len(d.current.b), "bytes, error:", d.current.err) + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } + if debugDecoder { + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if !d.o.ignoreChecksum && len(next.b) > 0 { + n, err := d.current.crc.Write(next.b) + if err == nil { + if n != len(next.b) { + d.current.err = io.ErrShortWrite + } + } + } + if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 { + got := d.current.crc.Sum64() + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(got)) + if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) { + if debugDecoder { + println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + println("CRC ok", tmp[:]) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err != nil { + return false + } + if d.frame.DictionaryID != nil { + dict, ok := d.dicts[*d.frame.DictionaryID] + if !ok { + d.current.err = ErrUnknownDictionary + return false + } else { + d.frame.history.setDict(&dict) + } + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } + if d.current.d.Last { + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last } return true } +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + // Close will release all resources. // It is NOT possible to reuse the decoder after this. func (d *Decoder) Close() { @@ -393,10 +572,10 @@ func (d *Decoder) Close() { return } d.drainOutput() - if d.stream != nil { - close(d.stream) + if d.current.cancel != nil { + d.current.cancel() d.streamWg.Wait() - d.stream = nil + d.current.cancel = nil } if d.decoders != nil { close(d.decoders) @@ -447,100 +626,296 @@ type decodeOutput struct { err error } -type decodeStream struct { - r io.Reader - - // Blocks ready to be written to output. - output chan decodeOutput - - // cancel reading from the input - cancel chan struct{} +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil } -// errEndOfStream indicates that everything from the stream was read. -var errEndOfStream = errors.New("end-of-stream") - // Create Decoder: -// Spawn n block decoders. These accept tasks to decode a block. -// Create goroutine that handles stream processing, this will send history to decoders as they are available. -// Decoders update the history as they decode. -// When a block is returned: -// a) history is sent to the next decoder, -// b) content written to CRC. -// c) return data to WRITER. -// d) wait for next block to return data. -// Once WRITTEN, the decoders reused by the writer frame decoder for re-use. -func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { +// ASYNC: +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { defer d.streamWg.Done() - frame := newFrameDec(d.o) - for stream := range inStream { - if debug { - println("got new stream") + br := readerWrapper{r: r} + + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) + } + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) + } + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize + } + seqExecute <- block } - br := readerWrapper{r: stream.r} - decodeStream: - for { - frame.history.reset() - err := frame.reset(&br) - if debug && err != nil { - println("Frame decoder returned", err) + close(seqExecute) + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history") + } + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true } else { - frame.history.setDict(&dict) + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } } } - if err != nil { - stream.output <- decodeOutput{ - err: err, + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + }() + +decodeStream: + for { + var hist history + var hasErr bool + + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block } - break + return } - if debug { - println("starting frame decoder") + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return } - // This goroutine will forward history between frames. - frame.frameDone.Add(1) - frame.initAsync() + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block + } + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil && frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + err = ErrUnknownDictionary + } else { + frame.history.setDict(&dict) + } + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + decodeBlock(dec) + } + break decodeStream + } - go frame.startDecoder(stream.output) - decodeFrame: - // Go through all blocks of the frame. - for { - dec := <-d.decoders - select { - case <-stream.cancel: - if !frame.sendErr(dec, io.EOF) { - // To not let the decoder dangle, send it back. - stream.output <- decodeOutput{d: dec} - } - break decodeStream - default: + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) + } + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) + } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.checkCRC = nil + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + dec.err = err } - err := frame.next(dec) - switch err { - case io.EOF: - // End of current frame, no error - println("EOF on next block") - break decodeFrame - case nil: - continue - default: - println("block decoder returned", err) - break decodeStream + var tmp [4]byte + copy(tmp[:], crc) + dec.checkCRC = tmp[:] + if debugDecoder { + println("found crc to check:", dec.checkCRC) } } - // All blocks have started decoding, check if there are more frames. - println("waiting for done") - frame.frameDone.Wait() - println("done waiting...") + err = dec.err + last := dec.Last + decodeBlock(dec) + if err != nil { + break decodeStream + } + if last { + break + } } - frame.frameDone.Wait() - println("Sending EOS") - stream.output <- decodeOutput{err: errEndOfStream} } + close(seqDecode) + wg.Wait() + d.frame.history.b = frameHistCache } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index 284d38449..c70e6fa0f 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -6,7 +6,6 @@ package zstd import ( "errors" - "fmt" "runtime" ) @@ -18,16 +17,22 @@ type decoderOptions struct { lowMem bool concurrent int maxDecodedSize uint64 + maxWindowSize uint64 dicts []dict + ignoreChecksum bool } func (o *decoderOptions) setDefault() { *o = decoderOptions{ // use less ram: true for now, but may change. - lowMem: true, - concurrent: runtime.GOMAXPROCS(0), + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, } - o.maxDecodedSize = 1 << 63 + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 } // WithDecoderLowmem will set whether to use a lower amount of memory, @@ -36,16 +41,25 @@ func WithDecoderLowmem(b bool) DOption { return func(o *decoderOptions) error { o.lowMem = b; return nil } } -// WithDecoderConcurrency will set the concurrency, -// meaning the maximum number of decoders to run concurrently. -// The value supplied must be at least 1. -// By default this will be set to GOMAXPROCS. +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. func WithDecoderConcurrency(n int) DOption { return func(o *decoderOptions) error { - if n <= 0 { - return fmt.Errorf("Concurrency must be at least 1") + if n < 0 { + return errors.New("concurrency must be at least 1") + } + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n } - o.concurrent = n return nil } } @@ -53,15 +67,14 @@ func WithDecoderConcurrency(n int) DOption { // WithDecoderMaxMemory allows to set a maximum decoded size for in-memory // non-streaming operations or maximum window size for streaming operations. // This can be used to control memory usage of potentially hostile content. -// For streaming operations, the maximum window size is capped at 1<<30 bytes. -// Maximum and default is 1 << 63 bytes. +// Maximum is 1 << 63 bytes. Default is 64GiB. func WithDecoderMaxMemory(n uint64) DOption { return func(o *decoderOptions) error { if n == 0 { return errors.New("WithDecoderMaxMemory must be at least 1") } if n > 1<<63 { - return fmt.Errorf("WithDecoderMaxmemory must be less than 1 << 63") + return errors.New("WithDecoderMaxmemory must be less than 1 << 63") } o.maxDecodedSize = n return nil @@ -82,3 +95,29 @@ func WithDecoderDicts(dicts ...[]byte) DOption { return nil } } + +// WithDecoderMaxWindow allows to set a maximum window size for decodes. +// This allows rejecting packets that will cause big memory usage. +// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. +// If WithDecoderMaxMemory is set to a lower value, that will be used. +// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +func WithDecoderMaxWindow(size uint64) DOption { + return func(o *decoderOptions) error { + if size < MinWindowSize { + return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") + } + if size > (1<<41)+7*(1<<38) { + return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") + } + o.maxWindowSize = size + return nil + } +} + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index fa25a18d8..a36ae83ef 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -82,7 +82,7 @@ func loadDict(b []byte) (*dict, error) { println("Transform table error:", err) return err } - if debug { + if debugDecoder || debugEncoder { println("Read table ok", "symbolLen:", dec.symbolLen) } // Set decoders as predefined so they aren't reused. diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index b1b7c6e6a..15ae8ee80 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -7,6 +7,10 @@ import ( "github.com/klauspost/compress/zstd/internal/xxhash" ) +const ( + dictShardBits = 6 +) + type fastBase struct { // cur is the offset at the start of hist cur int32 @@ -17,6 +21,7 @@ type fastBase struct { tmp [8]byte blk *blockEnc lastDictID uint32 + lowMem bool } // CRC returns the underlying CRC writer. @@ -33,8 +38,8 @@ func (e *fastBase) AppendCRC(dst []byte) []byte { // WindowSize returns the window size of the encoder, // or a window size small enough to contain the input size, if > 0. -func (e *fastBase) WindowSize(size int) int32 { - if size > 0 && size < int(e.maxMatchOff) { +func (e *fastBase) WindowSize(size int64) int32 { + if size > 0 && size < int64(e.maxMatchOff) { b := int32(1) << uint(bits.Len(uint(size))) // Keep minimum window. if b < 1024 { @@ -57,15 +62,10 @@ func (e *fastBase) addBlock(src []byte) int32 { // check if we have space already if len(e.hist)+len(src) > cap(e.hist) { if cap(e.hist) == 0 { - l := e.maxMatchOff * 2 - // Make it at least 1MB. - if l < 1<<20 { - l = 1 << 20 - } - e.hist = make([]byte, 0, l) + e.ensureHist(len(src)) } else { - if cap(e.hist) < int(e.maxMatchOff*2) { - panic("unexpected buffer size") + if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { + panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) } // Move down offset := int32(len(e.hist)) - e.maxMatchOff @@ -79,6 +79,28 @@ func (e *fastBase) addBlock(src []byte) int32 { return s } +// ensureHist will ensure that history can keep at least this many bytes. +func (e *fastBase) ensureHist(n int) { + if cap(e.hist) >= n { + return + } + l := e.maxMatchOff + if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { + l += maxCompressedBlockSize + } else { + l += e.maxMatchOff + } + // Make it at least 1MB. + if l < 1<<20 && !e.lowMem { + l = 1 << 20 + } + // Make it at least the requested size. + if l < int32(n) { + l = int32(n) + } + e.hist = make([]byte, 0, l) +} + // useBlock will replace the block with the provided one, // but transfer recent offsets from the previous. func (e *fastBase) UseBlock(enc *blockEnc) { @@ -86,11 +108,6 @@ func (e *fastBase) UseBlock(enc *blockEnc) { e.blk = enc } -func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 { - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) -} - func (e *fastBase) matchlen(s, t int32, src []byte) int32 { if debugAsserts { if s < 0 { @@ -109,15 +126,30 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 { panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) } } + a := src[s:] + b := src[t:] + b = b[:len(a)] + end := int32((len(a) >> 3) << 3) + for i := int32(0); i < end; i += 8 { + if diff := load6432(a, i) ^ load6432(b, i); diff != 0 { + return i + int32(bits.TrailingZeros64(diff)>>3) + } + } - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) + a = a[end:] + b = b[end:] + for i := range a { + if a[i] != b[i] { + return int32(i) + end + } + } + return int32(len(a)) + end } // Reset the encoding table. func (e *fastBase) resetBase(d *dict, singleBlock bool) { if e.blk == nil { - e.blk = &blockEnc{} + e.blk = &blockEnc{lowMem: e.lowMem} e.blk.init() } else { e.blk.reset(nil) @@ -128,14 +160,15 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) { } else { e.crc.Reset() } - if (!singleBlock || d.DictContentSize() > 0) && cap(e.hist) < int(e.maxMatchOff*2)+d.DictContentSize() { - l := e.maxMatchOff*2 + int32(d.DictContentSize()) - // Make it at least 1MB. - if l < 1<<20 { - l = 1 << 20 + if d != nil { + low := e.lowMem + if singleBlock { + e.lowMem = true } - e.hist = make([]byte, 0, l) + e.ensureHist(d.DictContentSize() + maxCompressedBlockSize) + e.lowMem = low } + // We offset current position so everything will be out of reach. // If above reset line, history will be purged. if e.cur < bufferReset { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go new file mode 100644 index 000000000..96028ecd8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -0,0 +1,558 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "fmt" + + "github.com/klauspost/compress" +) + +const ( + bestLongTableBits = 22 // Bits used in the long match table + bestLongTableSize = 1 << bestLongTableBits // Size of the table + bestLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + bestShortTableBits = 18 // Bits used in the short match table + bestShortTableSize = 1 << bestShortTableBits // Size of the table + bestShortLen = 4 // Bytes used for table hash + +) + +type match struct { + offset int32 + s int32 + length int32 + rep int32 + est int32 +} + +const highScore = 25000 + +// estBits will estimate output bits from predefined tables. +func (m *match) estBits(bitsPerByte int32) { + mlc := mlCode(uint32(m.length - zstdMinMatch)) + var ofc uint8 + if m.rep < 0 { + ofc = ofCode(uint32(m.s-m.offset) + 3) + } else { + ofc = ofCode(uint32(m.rep)) + } + // Cost, excluding + ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] + + // Add cost of match encoding... + m.est = int32(ofTT.outBits + mlTT.outBits) + m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) + // Subtract savings compared to literal encoding... + m.est -= (m.length * bitsPerByte) >> 10 + if m.est > 0 { + // Unlikely gain.. + m.length = 0 + m.est = highScore + } +} + +// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type bestFastEncoder struct { + fastBase + table [bestShortTableSize]prevEntry + longTable [bestLongTableSize]prevEntry + dictTable []prevEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 4 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = prevEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + v2 := e.table[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.table[i] = prevEntry{ + offset: v, + prev: v2, + } + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Use this to estimate literal cost. + // Scaled by 10 bits. + bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) + // Huffman can never go < 1 bit/byte + if bitsPerByte < 1024 { + bitsPerByte = 1024 + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + const kSearchStrength = 10 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + offset3 := int32(blk.recentOffsets[2]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + _ = addLiterals + + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + bestOf := func(a, b match) match { + if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 { + return a + } + return b + } + const goodEnough = 100 + + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + matchAt := func(offset int32, s int32, first uint32, rep int32) match { + if s-offset >= e.maxMatchOff || load3232(src, offset) != first { + return match{s: s, est: highScore} + } + if debugAsserts { + if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { + panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } + m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} + m.estBits(bitsPerByte) + return m + } + + best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) + + if canRepeat && best.length < goodEnough { + cv32 := uint32(cv >> 8) + spp := s + 1 + best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) + best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) + best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) + if best.length > 0 { + cv32 = uint32(cv >> 24) + spp += 2 + best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) + best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) + best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) + } + } + // Load next and check... + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + + // Look far ahead, unless we have a really long match already... + if best.length < goodEnough { + // No match found, move forward on input, no need to check forward... + if best.length < 4 { + s += 1 + (s-nextEmit)>>(kSearchStrength-1) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + continue + } + + s++ + candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] + cv = load6432(src, s) + cv2 := load6432(src, s+1) + candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] + candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] + + // Short at s+1 + best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + // Long at s+1, s+2 + best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) + best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) + if false { + // Short at s+3. + // Too often worse... + best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)) + } + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 { + bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1)) + if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 { + bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1)) + } + best = bestEnd + } + } + } + + if debugAsserts { + if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { + panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) + } + } + + // We have a match, we can store the forward value + if best.rep > 0 { + s = best.s + var seq seq + seq.matchLen = uint32(best.length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := best.s + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + repIndex := best.offset + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = uint32(best.rep) + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + s = best.s + best.length + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + + } + break encodeLoop + } + // Index skipped... + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + off++ + index0++ + } + switch best.rep { + case 2: + offset1, offset2 = offset2, offset1 + case 3: + offset1, offset2, offset3 = offset3, offset1, offset2 + } + cv = load6432(src, s) + continue + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + s = best.s + t := best.offset + offset1, offset2, offset3 = s-t, offset1, offset2 + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := best.length + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + // every entry + for index0 < s-1 { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + index0++ + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + blk.recentOffsets[2] = uint32(offset3) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Reset will reset and set a dictionary if not nil +func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]prevEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = bestShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 + e.dictTable[nextHash] = prevEntry{ + prev: e.dictTable[nextHash].offset, + offset: i, + } + e.dictTable[nextHash1] = prevEntry{ + prev: e.dictTable[nextHash1].offset, + offset: i + 1, + } + e.dictTable[nextHash2] = prevEntry{ + prev: e.dictTable[nextHash2].offset, + offset: i + 2, + } + e.dictTable[nextHash3] = prevEntry{ + prev: e.dictTable[nextHash3].offset, + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 94a5343d0..c769f6941 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -9,6 +9,7 @@ import "fmt" const ( betterLongTableBits = 19 // Bits used in the long match table betterLongTableSize = 1 << betterLongTableBits // Size of the table + betterLongLen = 8 // Bytes used for table hash // Note: Increasing the short table bits or making the hash shorter // can actually lead to compression degradation since it will 'steal' more from the @@ -16,29 +17,559 @@ const ( // This greatly depends on the type of input. betterShortTableBits = 13 // Bits used in the short match table betterShortTableSize = 1 << betterShortTableBits // Size of the table + betterShortLen = 5 // Bytes used for table hash + + betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table + betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard + + betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table + betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard ) -type prevEntry struct { - offset int32 - prev int32 +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +type betterFastEncoderDict struct { + betterFastEncoder + dictTable []tableEntry + dictLongTable []prevEntry + shortTableShardDirty [betterShortTableShardCnt]bool + longTableShardDirty [betterLongTableShardCnt]bool + allDirty bool +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + cv := load3232(src, s) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } } -// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type betterFastEncoder struct { - fastBase - table [betterShortTableSize]tableEntry - longTable [betterLongTableSize]prevEntry - dictTable []tableEntry - dictLongTable []prevEntry +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) } // Encode improves compression... -func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { +func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { const ( // Input margin is the number of bytes we read (8) // and the maximum we will read ahead (2) @@ -56,6 +587,7 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { e.longTable[i] = prevEntry{} } e.cur = e.maxMatchOff + e.allDirty = true break } // Shift down everything in the table that isn't already too far away. @@ -88,6 +620,7 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { prev: v2, } } + e.allDirty = true e.cur = e.maxMatchOff break } @@ -125,7 +658,7 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -141,8 +674,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hash5(cv, betterShortTableBits) - nextHashL := hash8(cv, betterLongTableBits) + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -150,7 +683,9 @@ encodeLoop: repIndex := s - offset1 + repOff off := s + e.cur e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + e.markShortShardDirty(nextHashS) if canRepeat { if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { @@ -191,7 +726,7 @@ encodeLoop: nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, lenght) } @@ -201,10 +736,13 @@ encodeLoop: for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) index0 += 2 } cv = load6432(src, s) @@ -251,7 +789,7 @@ encodeLoop: s += lenght + repOff2 nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, lenght) } @@ -262,10 +800,13 @@ encodeLoop: for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) index0 += 2 } cv = load6432(src, s) @@ -340,12 +881,13 @@ encodeLoop: // See if we can find a long match at s+1 const checkAt = 1 cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, betterLongTableBits) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) candidateL = e.longTable[nextHashL] coffsetL = candidateL.offset - e.cur // We can store it, since we have at least a 4 byte match. e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { // Found a long match, at least 8 bytes. matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 @@ -398,9 +940,41 @@ encodeLoop: } cv = load6432(src, s) } + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + cv := load3232(src, s) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. offset2 = offset1 offset1 = s - t @@ -449,10 +1023,13 @@ encodeLoop: for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) index0 += 2 } @@ -470,15 +1047,17 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hash5(cv, betterShortTableBits) - nextHashL := hash8(cv, betterLongTableBits) + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match l := 4 + e.matchlen(s+4, o2+4, src) e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.markLongShardDirty(nextHashL) e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShortShardDirty(nextHashS) seq.matchLen = uint32(l) - zstdMinMatch seq.litLen = 0 @@ -507,20 +1086,21 @@ encodeLoop: } blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[1] = uint32(offset2) - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } } -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.Encode(blk, src) +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("betterFastEncoder: Reset with dict") + } } // ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { +func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { e.resetBase(d, singleBlock) if d == nil { return @@ -535,10 +1115,10 @@ func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { const hashLog = betterShortTableBits cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hash5(cv, hashLog) // 0 -> 4 - nextHash1 := hash5(cv>>8, hashLog) // 1 -> 5 - nextHash2 := hash5(cv>>16, hashLog) // 2 -> 6 - nextHash3 := hash5(cv>>24, hashLog) // 3 -> 7 + nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 e.dictTable[nextHash] = tableEntry{ val: uint32(cv), offset: i, @@ -557,6 +1137,7 @@ func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { } } e.lastDictID = d.id + e.allDirty = true } // Init or copy dict table @@ -566,7 +1147,7 @@ func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { } if len(d.content) >= 8 { cv := load6432(d.content, 0) - h := hash8(cv, betterLongTableBits) + h := hashLen(cv, betterLongTableBits, betterLongLen) e.dictLongTable[h] = prevEntry{ offset: e.maxMatchOff, prev: e.dictLongTable[h].offset, @@ -576,7 +1157,7 @@ func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { off := 8 // First to read for i := e.maxMatchOff + 1; i < end; i++ { cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hash8(cv, betterLongTableBits) + h := hashLen(cv, betterLongTableBits, betterLongLen) e.dictLongTable[h] = prevEntry{ offset: i, prev: e.dictLongTable[h].offset, @@ -585,11 +1166,72 @@ func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { } } e.lastDictID = d.id + e.allDirty = true } + // Reset table to initial state - copy(e.longTable[:], e.dictLongTable) + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterShortTableShardCnt + const shardSize = betterShortTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.shortTableShardDirty { + e.shortTableShardDirty[i] = false + } + } else { + for i := range e.shortTableShardDirty { + if !e.shortTableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.shortTableShardDirty[i] = false + } + } + } + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterLongTableShardCnt + const shardSize = betterLongTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + } else { + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) + e.longTableShardDirty[i] = false + } + } + } e.cur = e.maxMatchOff - // Reset table to initial state - copy(e.table[:], e.dictTable) + e.allDirty = false +} + +func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/betterLongTableShardSize] = true +} + +func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { + e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index 19eebf66e..7ff0c64fa 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -10,16 +10,28 @@ const ( dFastLongTableBits = 17 // Bits used in the long match table dFastLongTableSize = 1 << dFastLongTableBits // Size of the table dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastLongLen = 8 // Bytes used for table hash + + dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table + dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard dFastShortTableBits = tableBits // Bits used in the short match table dFastShortTableSize = 1 << dFastShortTableBits // Size of the table dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastShortLen = 5 // Bytes used for table hash + ) type doubleFastEncoder struct { fastEncoder - longTable [dFastLongTableSize]tableEntry - dictLongTable []tableEntry + longTable [dFastLongTableSize]tableEntry +} + +type doubleFastEncoderDict struct { + fastEncoderDict + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry + longTableShardDirty [dLongTableShardCnt]bool } // Encode mimmics functionality in zstd_dfast.c @@ -100,7 +112,7 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -115,8 +127,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -161,7 +173,7 @@ encodeLoop: s += lenght + repOff nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, lenght) } @@ -199,7 +211,7 @@ encodeLoop: // See if we can find a long match at s+1 const checkAt = 1 cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, dFastLongTableBits) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) candidateL = e.longTable[nextHashL] coffsetL = s - (candidateL.offset - e.cur) + checkAt @@ -295,16 +307,16 @@ encodeLoop: cv1 := load6432(src, index1) te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hash8(cv0, dFastLongTableBits)] = te0 - e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 cv0 >>= 8 cv1 >>= 8 te0.offset++ te1.offset++ te0.val = uint32(cv0) te1.val = uint32(cv1) - e.table[hash5(cv0, dFastShortTableBits)] = te0 - e.table[hash5(cv1, dFastShortTableBits)] = te1 + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 cv = load6432(src, s) @@ -321,8 +333,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -359,7 +371,7 @@ encodeLoop: } blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[1] = uint32(offset2) - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } } @@ -418,7 +430,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -427,8 +439,8 @@ encodeLoop: var t int32 for { - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -474,7 +486,7 @@ encodeLoop: s += length + repOff nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, length) } @@ -512,7 +524,7 @@ encodeLoop: // See if we can find a long match at s+1 const checkAt = 1 cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, dFastLongTableBits) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) candidateL = e.longTable[nextHashL] coffsetL = s - (candidateL.offset - e.cur) + checkAt @@ -605,16 +617,16 @@ encodeLoop: cv1 := load6432(src, index1) te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hash8(cv0, dFastLongTableBits)] = te0 - e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 cv0 >>= 8 cv1 >>= 8 te0.offset++ te1.offset++ te0.val = uint32(cv0) te1.val = uint32(cv1) - e.table[hash5(cv0, dFastShortTableBits)] = te0 - e.table[hash5(cv1, dFastShortTableBits)] = te1 + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 cv = load6432(src, s) @@ -631,8 +643,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hash5(cv1>>8, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) + nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -668,7 +680,7 @@ encodeLoop: blk.literals = append(blk.literals, src[nextEmit:]...) blk.extraLits = len(src) - int(nextEmit) } - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } @@ -678,9 +690,379 @@ encodeLoop: } } +// Encode will encode the content, with a dictionary if initialized for it. +func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + e.markLongShardDirty(nextHashL) + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) + e.longTable[longHash1] = te0 + e.longTable[longHash2] = te1 + e.markLongShardDirty(longHash1) + e.markLongShardDirty(longHash2) + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) + hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) + e.table[hashVal1] = te0 + e.markShardDirty(hashVal1) + e.table[hashVal2] = te1 + e.markShardDirty(hashVal2) + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // If we encoded more than 64K mark all dirty. + if len(src) > 64<<10 { + e.markAllShardsDirty() + } +} + // ResetDict will reset and set a dictionary if not nil func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { e.fastEncoder.Reset(d, singleBlock) + if d != nil { + panic("doubleFastEncoder: Reset with dict not supported") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { + allDirty := e.allDirty + e.fastEncoderDict.Reset(d, singleBlock) if d == nil { return } @@ -692,22 +1074,51 @@ func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { } if len(d.content) >= 8 { cv := load6432(d.content, 0) - e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ val: uint32(cv), offset: e.maxMatchOff, } end := int32(len(d.content)) - 8 + e.maxMatchOff for i := e.maxMatchOff + 1; i < end; i++ { cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) - e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ val: uint32(cv), offset: i, } } } e.lastDictID = d.id + e.allDirty = true } // Reset table to initial state e.cur = e.maxMatchOff - copy(e.longTable[:], e.dictLongTable) + + dirtyShardCnt := 0 + if !allDirty { + for i := range e.longTableShardDirty { + if e.longTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + return + } + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + e.longTableShardDirty[i] = false + } +} + +func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/dLongTableShardSize] = true } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 0b301df43..f51ab529a 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -6,15 +6,16 @@ package zstd import ( "fmt" - "math" - "math/bits" ) const ( - tableBits = 15 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - maxMatchLength = 131074 + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableFastHashLen = 6 + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 ) type tableEntry struct { @@ -24,8 +25,14 @@ type tableEntry struct { type fastEncoder struct { fastBase - table [tableSize]tableEntry - dictTable []tableEntry + table [tableSize]tableEntry +} + +type fastEncoderDict struct { + fastEncoder + dictTable []tableEntry + tableShardDirty [tableShardCnt]bool + allDirty bool } // Encode mimmics functionality in zstd_fast.c @@ -78,7 +85,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { // TEMPLATE const hashLog = tableBits // seems global, but would be nice to tweak. - const kSearchStrength = 8 + const kSearchStrength = 6 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s @@ -95,7 +102,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -114,8 +121,8 @@ encodeLoop: panic("offset0 was 0") } - nextHash := hash6(cv, hashLog) - nextHash2 := hash6(cv>>8, hashLog) + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) candidate := e.table[nextHash] candidate2 := e.table[nextHash2] repIndex := s - offset1 + 2 @@ -127,20 +134,7 @@ encodeLoop: // Consider history as well. var seq seq var length int32 - // length = 4 + e.matchlen(s+6, repIndex+4, src) - { - a := src[s+6:] - b := src[repIndex+4:] - endI := len(a) & (math.MaxInt32 - 7) - length = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - + length = 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. @@ -170,7 +164,7 @@ encodeLoop: s += length + 2 nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, length) } @@ -227,20 +221,7 @@ encodeLoop: } // Extend the 4-byte match as long as possible. - //l := e.matchlen(s+4, t+4, src) + 4 - var l int32 - { - a := src[s+4:] - b := src[t+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards tMin := s - e.maxMatchOff @@ -277,23 +258,10 @@ encodeLoop: if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { // We have at least 4 byte match. // No need to check backwards. We come straight from a match - //l := 4 + e.matchlen(s+4, o2+4, src) - var l int32 - { - a := src[s+4:] - b := src[o2+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + l := 4 + e.matchlen(s+4, o2+4, src) // Store this, since we have it. - nextHash := hash6(cv, hashLog) + nextHash := hashLen(cv, hashLog, tableFastHashLen) e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} seq.matchLen = uint32(l) - zstdMinMatch seq.litLen = 0 @@ -322,7 +290,7 @@ encodeLoop: } blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[1] = uint32(offset2) - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } } @@ -335,7 +303,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { inputMargin = 8 minNonLiteralBlockSize = 1 + 1 + inputMargin ) - if debug { + if debugEncoder { if len(src) > maxBlockSize { panic("src too big") } @@ -366,7 +334,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { // TEMPLATE const hashLog = tableBits // seems global, but would be nice to tweak. - const kSearchStrength = 8 + const kSearchStrength = 6 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s @@ -383,7 +351,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -397,8 +365,8 @@ encodeLoop: // By not using them for the first 3 matches for { - nextHash := hash6(cv, hashLog) - nextHash2 := hash6(cv>>8, hashLog) + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) candidate := e.table[nextHash] candidate2 := e.table[nextHash2] repIndex := s - offset1 + 2 @@ -409,21 +377,7 @@ encodeLoop: if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { // Consider history as well. var seq seq - // length := 4 + e.matchlen(s+6, repIndex+4, src) - // length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:])) - var length int32 - { - a := src[s+6:] - b := src[repIndex+4:] - endI := len(a) & (math.MaxInt32 - 7) - length = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + length := 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) @@ -454,7 +408,7 @@ encodeLoop: s += length + 2 nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, length) } @@ -513,21 +467,7 @@ encodeLoop: panic(fmt.Sprintf("t (%d) < 0 ", t)) } // Extend the 4-byte match as long as possible. - //l := e.matchlenNoHist(s+4, t+4, src) + 4 - // l := int32(matchLen(src[s+4:], src[t+4:])) + 4 - var l int32 - { - a := src[s+4:] - b := src[t+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards tMin := s - e.maxMatchOff @@ -564,24 +504,10 @@ encodeLoop: if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { // We have at least 4 byte match. // No need to check backwards. We come straight from a match - //l := 4 + e.matchlenNoHist(s+4, o2+4, src) - // l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) - var l int32 - { - a := src[s+4:] - b := src[o2+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + l := 4 + e.matchlen(s+4, o2+4, src) // Store this, since we have it. - nextHash := hash6(cv, hashLog) + nextHash := hashLen(cv, hashLog, tableFastHashLen) e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} seq.matchLen = uint32(l) - zstdMinMatch seq.litLen = 0 @@ -608,7 +534,7 @@ encodeLoop: blk.literals = append(blk.literals, src[nextEmit:]...) blk.extraLits = len(src) - int(nextEmit) } - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } // We do not store history, so we must offset e.cur to avoid false matches for next user. @@ -617,8 +543,284 @@ encodeLoop: } } +// Encode will encode the content, with a dictionary if initialized for it. +func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if e.allDirty || len(src) > 32<<10 { + e.fastEncoder.Encode(blk, src) + e.allDirty = true + return + } + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + e.markShardDirty(nextHash2) + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + length = 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + // ResetDict will reset and set a dictionary if not nil func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("fastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { e.resetBase(d, singleBlock) if d == nil { return @@ -635,9 +837,9 @@ func (e *fastEncoder) Reset(d *dict, singleBlock bool) { const hashLog = tableBits cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hash6(cv, hashLog) // 0 -> 5 - nextHash1 := hash6(cv>>8, hashLog) // 1 -> 6 - nextHash2 := hash6(cv>>16, hashLog) // 2 -> 7 + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6 + nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7 e.dictTable[nextHash] = tableEntry{ val: uint32(cv), offset: i, @@ -653,9 +855,44 @@ func (e *fastEncoder) Reset(d *dict, singleBlock bool) { } } e.lastDictID = d.id + e.allDirty = true } e.cur = e.maxMatchOff - // Reset table to initial state - copy(e.table[:], e.dictTable) + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.tableShardDirty { + if e.tableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + const shardCnt = tableShardCnt + const shardSize = tableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.tableShardDirty { + e.tableShardDirty[i] = false + } + e.allDirty = false + return + } + for i := range e.tableShardDirty { + if !e.tableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.tableShardDirty[i] = false + } + e.allDirty = false +} + +func (e *fastEncoderDict) markAllShardsDirty() { + e.allDirty = true +} + +func (e *fastEncoderDict) markShardDirty(entryNum uint32) { + e.tableShardDirty[entryNum/tableShardSize] = true } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index f5759211d..e6b1d01cf 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -33,7 +33,7 @@ type encoder interface { Block() *blockEnc CRC() *xxhash.Digest AppendCRC([]byte) []byte - WindowSize(size int) int32 + WindowSize(size int64) int32 UseBlock(*blockEnc) Reset(d *dict, singleBlock bool) } @@ -48,6 +48,8 @@ type encoderState struct { err error writeErr error nWritten int64 + nInput int64 + frameContentSize int64 headerWritten bool eofWritten bool fullFrameWritten bool @@ -96,23 +98,25 @@ func (e *Encoder) Reset(w io.Writer) { if cap(s.filling) == 0 { s.filling = make([]byte, 0, e.o.blockSize) } - if cap(s.current) == 0 { - s.current = make([]byte, 0, e.o.blockSize) - } - if cap(s.previous) == 0 { - s.previous = make([]byte, 0, e.o.blockSize) + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() } if s.encoder == nil { s.encoder = e.o.encoder() } - if s.writing == nil { - s.writing = &blockEnc{} - s.writing.init() - } - s.writing.initNewEncode() s.filling = s.filling[:0] - s.current = s.current[:0] - s.previous = s.previous[:0] s.encoder.Reset(e.o.dict, false) s.headerWritten = false s.eofWritten = false @@ -120,7 +124,21 @@ func (e *Encoder) Reset(w io.Writer) { s.w = w s.err = nil s.nWritten = 0 + s.nInput = 0 s.writeErr = nil + s.frameContentSize = 0 +} + +// ResetContentSize will reset and set a content size for the next stream. +// If the bytes written does not match the size given an error will be returned +// when calling Close(). +// This is removed when Reset is called. +// Sizes <= 0 results in no content size set. +func (e *Encoder) ResetContentSize(w io.Writer, size int64) { + e.Reset(w) + if size >= 0 { + e.state.frameContentSize = size + } } // Write data to the encoder. @@ -176,6 +194,12 @@ func (e *Encoder) nextBlock(final bool) error { } if !s.headerWritten { // If we have a single block encode, do a sync compression. + if final && len(s.filling) == 0 && !e.o.fullZero { + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } if final && len(s.filling) > 0 { s.current = e.EncodeAll(s.filling, s.current[:0]) var n2 int @@ -184,6 +208,7 @@ func (e *Encoder) nextBlock(final bool) error { return s.err } s.nWritten += int64(n2) + s.nInput += int64(len(s.filling)) s.current = s.current[:0] s.filling = s.filling[:0] s.headerWritten = true @@ -194,8 +219,8 @@ func (e *Encoder) nextBlock(final bool) error { var tmp [maxHeaderSize]byte fh := frameHeader{ - ContentSize: 0, - WindowSize: uint32(s.encoder.WindowSize(0)), + ContentSize: uint64(s.frameContentSize), + WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), SingleSegment: false, Checksum: e.o.crc, DictID: e.o.dict.ID(), @@ -235,11 +260,52 @@ func (e *Encoder) nextBlock(final bool) error { return s.err } + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.err = err + return err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + // Move blocks forward. s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.nInput += int64(len(s.current)) s.wg.Add(1) go func(src []byte) { - if debug { + if debugEncoder { println("Adding block,", len(src), "bytes, final:", final) } defer func() { @@ -284,7 +350,7 @@ func (e *Encoder) nextBlock(final bool) error { } switch err { case errIncompressible: - if debug { + if debugEncoder { println("Storing incompressible block as raw") } blk.encodeRaw(src) @@ -307,7 +373,7 @@ func (e *Encoder) nextBlock(final bool) error { // // The Copy function uses ReaderFrom if available. func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { - if debug { + if debugEncoder { println("Using ReadFrom") } @@ -330,20 +396,20 @@ func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { switch err { case io.EOF: e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] - if debug { + if debugEncoder { println("ReadFrom: got EOF final block:", len(e.state.filling)) } return n, nil + case nil: default: - if debug { + if debugEncoder { println("ReadFrom: got error:", err) } e.state.err = err return n, err - case nil: } if len(src) > 0 { - if debug { + if debugEncoder { println("ReadFrom: got space left in source:", len(src)) } continue @@ -388,6 +454,11 @@ func (e *Encoder) Close() error { if err != nil { return err } + if s.frameContentSize > 0 { + if s.nInput != s.frameContentSize { + return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) + } + } if e.state.fullFrameWritten { return s.err } @@ -464,14 +535,14 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } fh := frameHeader{ ContentSize: uint64(len(src)), - WindowSize: uint32(enc.WindowSize(len(src))), + WindowSize: uint32(enc.WindowSize(int64(len(src)))), SingleSegment: single, Checksum: e.o.crc, DictID: e.o.dict.ID(), } // If less than 1MB, allocate a buffer up front. - if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 { + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { dst = make([]byte, 0, len(src)) } dst, err := fh.appendTo(dst) @@ -480,7 +551,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } // If we can do everything in one block, prefer that. - if len(src) <= maxCompressedBlockSize { + if len(src) <= e.o.blockSize { enc.Reset(e.o.dict, true) // Slightly faster with no history and everything in one block. if e.o.crc { @@ -506,7 +577,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { switch err { case errIncompressible: - if debug { + if debugEncoder { println("Storing incompressible block as raw") } dst = blk.encodeRawTo(dst, src) @@ -542,7 +613,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { switch err { case errIncompressible: - if debug { + if debugEncoder { println("Storing incompressible block as raw") } dst = blk.encodeRawTo(dst, todo) diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 1209915bc..44d8dbd19 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -24,31 +24,45 @@ type encoderOptions struct { allLitEntropy bool customWindow bool customALEntropy bool + customBlockSize bool + lowMem bool dict *dict } func (o *encoderOptions) setDefault() { *o = encoderOptions{ - // use less ram: true for now, but may change. concurrent: runtime.GOMAXPROCS(0), crc: true, single: nil, - blockSize: 1 << 16, + blockSize: maxCompressedBlockSize, windowSize: 8 << 20, level: SpeedDefault, allLitEntropy: true, + lowMem: false, } } // encoder returns an encoder with the selected options. func (o encoderOptions) encoder() encoder { switch o.level { + case SpeedFastest: + if o.dict != nil { + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + } + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + case SpeedDefault: - return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}}} + if o.dict != nil { + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}} + } + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} case SpeedBetterCompression: - return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}} - case SpeedFastest: - return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}} + if o.dict != nil { + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + } + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + case SpeedBestCompression: + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} } panic("unknown compression level") } @@ -60,8 +74,9 @@ func WithEncoderCRC(b bool) EOption { } // WithEncoderConcurrency will set the concurrency, -// meaning the maximum number of decoders to run concurrently. +// meaning the maximum number of encoders to run concurrently. // The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. // By default this will be set to GOMAXPROCS. func WithEncoderConcurrency(n int) EOption { return func(o *encoderOptions) error { @@ -93,6 +108,7 @@ func WithWindowSize(n int) EOption { o.customWindow = true if o.blockSize > o.windowSize { o.blockSize = o.windowSize + o.customBlockSize = true } return nil } @@ -143,20 +159,20 @@ const ( // By using this, notice that CPU usage may go up in the future. SpeedBetterCompression + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression + // speedLast should be kept as the last actual compression option. // The is not for external usage, but is used to keep track of the valid options. speedLast - - // SpeedBestCompression will choose the best available compression option. - // For now this is not implemented. - SpeedBestCompression = SpeedBetterCompression ) // EncoderLevelFromString will convert a string representation of an encoding level back // to a compression level. The compare is not case sensitive. // If the string wasn't recognized, (false, SpeedDefault) will be returned. func EncoderLevelFromString(s string) (bool, EncoderLevel) { - for l := EncoderLevel(speedNotSet + 1); l < speedLast; l++ { + for l := speedNotSet + 1; l < speedLast; l++ { if strings.EqualFold(s, l.String()) { return true, l } @@ -173,10 +189,11 @@ func EncoderLevelFromZstd(level int) EncoderLevel { return SpeedFastest case level >= 3 && level < 6: return SpeedDefault - case level > 5: + case level >= 6 && level < 10: return SpeedBetterCompression + default: + return SpeedBestCompression } - return SpeedDefault } // String provides a string representation of the compression level. @@ -188,6 +205,8 @@ func (e EncoderLevel) String() string { return "default" case SpeedBetterCompression: return "better" + case SpeedBestCompression: + return "best" default: return "invalid" } @@ -205,10 +224,15 @@ func WithEncoderLevel(l EncoderLevel) EOption { switch o.level { case SpeedFastest: o.windowSize = 4 << 20 + if !o.customBlockSize { + o.blockSize = 1 << 16 + } case SpeedDefault: o.windowSize = 8 << 20 case SpeedBetterCompression: o.windowSize = 16 << 20 + case SpeedBestCompression: + o.windowSize = 32 << 20 } } if !o.customALEntropy { @@ -268,6 +292,17 @@ func WithSingleSegment(b bool) EOption { } } +// WithLowerEncoderMem will trade in some memory cases trade less memory usage for +// slower encoding speed. +// This will not change the window size which is the primary function for reducing +// memory usage. See WithWindowSize. +func WithLowerEncoderMem(b bool) EOption { + return func(o *encoderOptions) error { + o.lowMem = b + return nil + } +} + // WithEncoderDict allows to register a dictionary that will be used for the encode. // The encoder *may* choose to use no dictionary instead for certain payloads. func WithEncoderDict(dict []byte) EOption { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index fc4a566d3..fa0a633f3 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -8,27 +8,17 @@ import ( "bytes" "encoding/hex" "errors" - "hash" "io" - "sync" "github.com/klauspost/compress/zstd/internal/xxhash" ) type frameDec struct { - o decoderOptions - crc hash.Hash64 - offset int64 + o decoderOptions + crc *xxhash.Digest WindowSize uint64 - // maxWindowSize is the maximum windows size to support. - // should never be bigger than max-int. - maxWindowSize uint64 - - // In order queue of blocks being decoded. - decoding chan *blockDec - // Frame history passed between blocks history history @@ -38,20 +28,18 @@ type frameDec struct { bBuf byteBuf FrameContentSize uint64 - frameDone sync.WaitGroup DictionaryID *uint32 HasCheckSum bool SingleSegment bool - - // asyncRunning indicates whether the async routine processes input on 'decoding'. - asyncRunningMu sync.Mutex - asyncRunning bool } const ( - // The minimum Window_Size is 1 KB. + // MinWindowSize is the minimum Window Size, which is 1 KB. MinWindowSize = 1 << 10 + + // MaxWindowSize is the maximum encoder window size + // and the default decoder maximum window size. MaxWindowSize = 1 << 29 ) @@ -61,12 +49,11 @@ var ( ) func newFrameDec(o decoderOptions) *frameDec { - d := frameDec{ - o: o, - maxWindowSize: MaxWindowSize, + if o.maxWindowSize > o.maxDecodedSize { + o.maxWindowSize = o.maxDecodedSize } - if d.maxWindowSize > o.maxDecodedSize { - d.maxWindowSize = o.maxDecodedSize + d := frameDec{ + o: o, } return &d } @@ -78,50 +65,74 @@ func newFrameDec(o decoderOptions) *frameDec { func (d *frameDec) reset(br byteBuffer) error { d.HasCheckSum = false d.WindowSize = 0 - var b []byte + var signature [4]byte for { - b = br.readSmall(4) - if b == nil { + var err error + // Check if we can read more... + b, err := br.readSmall(1) + switch err { + case io.EOF, io.ErrUnexpectedEOF: + return io.EOF + default: + return err + case nil: + signature[0] = b[0] + } + // Read the rest, don't allow io.ErrUnexpectedEOF + b, err = br.readSmall(3) + switch err { + case io.EOF: return io.EOF + default: + return err + case nil: + copy(signature[1:], b) } - if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { - if debug { - println("Not skippable", hex.EncodeToString(b), hex.EncodeToString(skippableFrameMagic)) + + if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 { + if debugDecoder { + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic)) } // Break if not skippable frame. break } // Read size to skip - b = br.readSmall(4) - if b == nil { - println("Reading Frame Size EOF") - return io.ErrUnexpectedEOF + b, err = br.readSmall(4) + if err != nil { + if debugDecoder { + println("Reading Frame Size", err) + } + return err } n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) println("Skipping frame with", n, "bytes.") - err := br.skipN(int(n)) + err = br.skipN(int(n)) if err != nil { - if debug { + if debugDecoder { println("Reading discarded frame", err) } return err } } - if !bytes.Equal(b, frameMagic) { - println("Got magic numbers: ", b, "want:", frameMagic) + if !bytes.Equal(signature[:], frameMagic) { + if debugDecoder { + println("Got magic numbers: ", signature, "want:", frameMagic) + } return ErrMagicMismatch } // Read Frame_Header_Descriptor fhd, err := br.readByte() if err != nil { - println("Reading Frame_Header_Descriptor", err) + if debugDecoder { + println("Reading Frame_Header_Descriptor", err) + } return err } d.SingleSegment = fhd&(1<<5) != 0 if fhd&(1<<3) != 0 { - return errors.New("Reserved bit set on frame header") + return errors.New("reserved bit set on frame header") } // Read Window_Descriptor @@ -130,7 +141,9 @@ func (d *frameDec) reset(br byteBuffer) error { if !d.SingleSegment { wd, err := br.readByte() if err != nil { - println("Reading Window_Descriptor", err) + if debugDecoder { + println("Reading Window_Descriptor", err) + } return err } printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) @@ -147,12 +160,11 @@ func (d *frameDec) reset(br byteBuffer) error { if size == 3 { size = 4 } - b = br.readSmall(int(size)) - if b == nil { - if debug { - println("Reading Dictionary_ID", io.ErrUnexpectedEOF) - } - return io.ErrUnexpectedEOF + + b, err := br.readSmall(int(size)) + if err != nil { + println("Reading Dictionary_ID", err) + return err } var id uint32 switch size { @@ -163,7 +175,7 @@ func (d *frameDec) reset(br byteBuffer) error { case 4: id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) } - if debug { + if debugDecoder { println("Dict size", size, "ID:", id) } if id > 0 { @@ -185,12 +197,12 @@ func (d *frameDec) reset(br byteBuffer) error { default: fcsSize = 1 << v } - d.FrameContentSize = 0 + d.FrameContentSize = fcsUnknown if fcsSize > 0 { - b := br.readSmall(fcsSize) - if b == nil { - println("Reading Frame content", io.ErrUnexpectedEOF) - return io.ErrUnexpectedEOF + b, err := br.readSmall(fcsSize) + if err != nil { + println("Reading Frame content", err) + return err } switch fcsSize { case 1: @@ -205,10 +217,11 @@ func (d *frameDec) reset(br byteBuffer) error { d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) } - if debug { - println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) + if debugDecoder { + println("Read FCS:", d.FrameContentSize) } } + // Move this to shared. d.HasCheckSum = fhd&(1<<2) != 0 if d.HasCheckSum { @@ -226,21 +239,32 @@ func (d *frameDec) reset(br byteBuffer) error { } } - if d.WindowSize > d.maxWindowSize { - printf("window size %d > max %d\n", d.WindowSize, d.maxWindowSize) + if d.WindowSize > uint64(d.o.maxWindowSize) { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } return ErrWindowSizeExceeded } // The minimum Window_Size is 1 KB. if d.WindowSize < MinWindowSize { - println("got window size: ", d.WindowSize) + if debugDecoder { + println("got window size: ", d.WindowSize) + } return ErrWindowSizeTooSmall } d.history.windowSize = int(d.WindowSize) - if d.o.lowMem && d.history.windowSize < maxBlockSize { - d.history.maxSize = d.history.windowSize * 2 + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or very small window size. + d.history.allocFrameBuffer = d.history.windowSize * 2 } else { - d.history.maxSize = d.history.windowSize + maxBlockSize + // Alloc with one additional block + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + // history contains input - maybe we do something d.rawInput = br return nil @@ -248,56 +272,37 @@ func (d *frameDec) reset(br byteBuffer) error { // next will start decoding the next block from stream. func (d *frameDec) next(block *blockDec) error { - if debug { - printf("decoding new block %p:%p", block, block.data) + if debugDecoder { + println("decoding new block") } err := block.reset(d.rawInput, d.WindowSize) if err != nil { println("block error:", err) // Signal the frame decoder we have a problem. - d.sendErr(block, err) + block.sendErr(err) return err } - block.input <- struct{}{} - if debug { - println("next block:", block) - } - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return nil - } - if block.Last { - // We indicate the frame is done by sending io.EOF - d.decoding <- block - return io.EOF - } - d.decoding <- block return nil } -// sendEOF will queue an error block on the frame. -// This will cause the frame decoder to return when it encounters the block. -// Returns true if the decoder was added. -func (d *frameDec) sendErr(block *blockDec, err error) bool { - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return false - } - - println("sending error", err.Error()) - block.sendErr(err) - d.decoding <- block - return true -} - // checkCRC will check the checksum if the frame has one. // Will return ErrCRCMismatch if crc check failed, otherwise nil. func (d *frameDec) checkCRC() error { if !d.HasCheckSum { return nil } + + // We can overwrite upper tmp now + want, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + + if d.o.ignoreChecksum { + return nil + } + var tmp [4]byte got := d.crc.Sum64() // Flip to match file order. @@ -306,142 +311,29 @@ func (d *frameDec) checkCRC() error { tmp[2] = byte(got >> 16) tmp[3] = byte(got >> 24) - // We can overwrite upper tmp now - want := d.rawInput.readSmall(4) - if want == nil { - println("CRC missing?") - return io.ErrUnexpectedEOF - } - if !bytes.Equal(tmp[:], want) { - if debug { + if debugDecoder { println("CRC Check Failed:", tmp[:], "!=", want) } return ErrCRCMismatch } - if debug { + if debugDecoder { println("CRC ok", tmp[:]) } return nil } -func (d *frameDec) initAsync() { - if !d.o.lowMem && !d.SingleSegment { - // set max extra size history to 10MB. - d.history.maxSize = d.history.windowSize + maxBlockSize*5 - } - // re-alloc if more than one extra block size. - if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.history.b) < d.history.maxSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.decoding) < d.o.concurrent { - d.decoding = make(chan *blockDec, d.o.concurrent) - } - if debug { - h := d.history - printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) - } - d.asyncRunningMu.Lock() - d.asyncRunning = true - d.asyncRunningMu.Unlock() -} - -// startDecoder will start decoding blocks and write them to the writer. -// The decoder will stop as soon as an error occurs or at end of frame. -// When the frame has finished decoding the *bufio.Reader -// containing the remaining input will be sent on frameDec.frameDone. -func (d *frameDec) startDecoder(output chan decodeOutput) { - written := int64(0) - - defer func() { - d.asyncRunningMu.Lock() - d.asyncRunning = false - d.asyncRunningMu.Unlock() - - // Drain the currently decoding. - d.history.error = true - flushdone: - for { - select { - case b := <-d.decoding: - b.history <- &d.history - output <- <-b.result - default: - break flushdone - } - } - println("frame decoder done, signalling done") - d.frameDone.Done() - }() - // Get decoder for first block. - block := <-d.decoding - block.history <- &d.history - for { - var next *blockDec - // Get result - r := <-block.result - if r.err != nil { - println("Result contained error", r.err) - output <- r - return - } - if debug { - println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) - d.offset += int64(len(r.b)) - } - if !block.Last { - // Send history to next block - select { - case next = <-d.decoding: - if debug { - println("Sending ", len(d.history.b), "bytes as history") - } - next.history <- &d.history - default: - // Wait until we have sent the block, so - // other decoders can potentially get the decoder. - next = nil - } - } - - // Add checksum, async to decoding. - if d.HasCheckSum { - n, err := d.crc.Write(r.b) - if err != nil { - r.err = err - if n != len(r.b) { - r.err = io.ErrShortWrite - } - output <- r - return - } - } - written += int64(len(r.b)) - if d.SingleSegment && uint64(written) > d.FrameContentSize { - println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize) - r.err = ErrFrameSizeExceeded - output <- r - return - } - if block.Last { - r.err = d.checkCRC() - output <- r - return - } - output <- r - if next == nil { - // There was no decoder available, we wait for one now that we have sent to the writer. - if debug { - println("Sending ", len(d.history.b), " bytes as history") - } - next = <-d.decoding - next.history <- &d.history +// consumeCRC reads the checksum data if the frame has one. +func (d *frameDec) consumeCRC() error { + if d.HasCheckSum { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err } - block = next } + + return nil } // runDecoder will create a sync decoder that will decode a block of data. @@ -450,41 +342,67 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { // We use the history for output to avoid copying it. d.history.b = dst + d.history.ignoreBuffer = len(dst) // Store input length, so we only check new data. crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.FrameContentSize != fcsUnknown { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } + if uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } var err error for { err = dec.reset(d.rawInput, d.WindowSize) if err != nil { break } - if debug { + if debugDecoder { println("next block:", dec) } err = dec.decodeBuf(&d.history) - if err != nil || dec.Last { + if err != nil { break } if uint64(len(d.history.b)) > d.o.maxDecodedSize { err = ErrDecoderSizeExceeded break } - if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { - println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize) + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) err = ErrFrameSizeExceeded break } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } } dst = d.history.b if err == nil { - if d.HasCheckSum { - var n int - n, err = d.crc.Write(dst[crcStart:]) - if err == nil { - if n != len(dst)-crcStart { - err = io.ErrShortWrite - } else { - err = d.checkCRC() + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + var n int + n, err = d.crc.Write(dst[crcStart:]) + if err == nil { + if n != len(dst)-crcStart { + err = io.ErrShortWrite + } else { + err = d.checkCRC() + } } } } diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go index e6d3d49b3..23333b969 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -5,8 +5,10 @@ package zstd import ( + "encoding/binary" "errors" "fmt" + "io" ) const ( @@ -182,6 +184,29 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { return s.buildDtable() } +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + // decSymbol contains information about a state entry, // Including the state offset base, the output symbol and // the number of bits to read for the low part of the destination state. @@ -204,18 +229,10 @@ func (d decSymbol) newState() uint16 { return uint16(d >> 16) } -func (d decSymbol) baseline() uint32 { - return uint32(d >> 32) -} - func (d decSymbol) baselineInt() int { return int(d >> 32) } -func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) { - *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) -} - func (d *decSymbol) setNBits(nBits uint8) { const mask = 0xffffffffffffff00 *d = (*d & mask) | decSymbol(nBits) @@ -231,11 +248,6 @@ func (d *decSymbol) setNewState(state uint16) { *d = (*d & mask) | decSymbol(state)<<16 } -func (d *decSymbol) setBaseline(baseline uint32) { - const mask = 0xffffffff - *d = (*d & mask) | decSymbol(baseline)<<32 -} - func (d *decSymbol) setExt(addBits uint8, baseline uint32) { const mask = 0xffff00ff *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) @@ -352,34 +364,7 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { s.state = dt[br.getBits(tableLog)] } -// next returns the current symbol and sets the next state. -// At least tablelog bits must be available in the bit reader. -func (s *fseState) next(br *bitReader) { - lowBits := uint16(br.getBits(s.state.nbBits())) - s.state = s.dt[s.state.newState()+lowBits] -} - -// finished returns true if all bits have been read from the bitstream -// and the next state would require reading bits from the input. -func (s *fseState) finished(br *bitReader) bool { - return br.finished() && s.state.nbBits() > 0 -} - -// final returns the current state symbol without decoding the next. -func (s *fseState) final() (int, uint8) { - return s.state.baselineInt(), s.state.addBits() -} - // final returns the current state symbol without decoding the next. func (s decSymbol) final() (int, uint8) { return s.baselineInt(), s.addBits() } - -// nextFast returns the next symbol and sets the next state. -// This can only be used if no symbols are 0 bits. -// At least tablelog bits must be available in the bit reader. -func (s *fseState) nextFast(br *bitReader) (uint32, uint8) { - lowBits := uint16(br.getBitsFast(s.state.nbBits())) - s.state = s.dt[s.state.newState()+lowBits] - return s.state.baseline(), s.state.addBits() -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go index aa9eba88b..ab26326a8 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -62,9 +62,8 @@ func (s symbolTransform) String() string { // To indicate that you have populated the histogram call HistogramFinished // with the value of the highest populated symbol, as well as the number of entries // in the most populated entry. These are accepted at face value. -// The returned slice will always be length 256. -func (s *fseEncoder) Histogram() []uint32 { - return s.count[:] +func (s *fseEncoder) Histogram() *[256]uint32 { + return &s.count } // HistogramFinished can be called to indicate that the histogram has been populated. @@ -77,27 +76,12 @@ func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { s.clearCount = maxCount != 0 } -// prepare will prepare and allocate scratch tables used for both compression and decompression. -func (s *fseEncoder) prepare() (*fseEncoder, error) { - if s == nil { - s = &fseEncoder{} - } - s.useRLE = false - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - return s, nil -} - // allocCtable will allocate tables needed for compression. // If existing tables a re big enough, they are simply re-used. func (s *fseEncoder) allocCtable() { tableSize := 1 << s.actualTableLog // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < int(tableSize) { + if cap(s.ct.tableSymbol) < tableSize { s.ct.tableSymbol = make([]byte, tableSize) } s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] @@ -202,13 +186,13 @@ func (s *fseEncoder) buildCTable() error { case 0: case -1, 1: symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = int16(total - 1) + symbolTT[i].deltaFindState = total - 1 total++ default: maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) minStatePlus := uint32(v) << maxBitsOut symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = int16(total - v) + symbolTT[i].deltaFindState = total - v total += v } } @@ -229,7 +213,7 @@ func (s *fseEncoder) setRLE(val byte) { deltaFindState: 0, deltaNbBits: 0, } - if debug { + if debugEncoder { println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) } s.rleVal = val @@ -353,8 +337,8 @@ func (s *fseEncoder) normalizeCount2(length int) error { distributed uint32 total = uint32(length) tableLog = s.actualTableLog - lowThreshold = uint32(total >> tableLog) - lowOne = uint32((total * 3) >> (tableLog + 1)) + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) ) for i, cnt := range s.count[:s.symbolLen] { if cnt == 0 { @@ -379,7 +363,7 @@ func (s *fseEncoder) normalizeCount2(length int) error { if (total / toDistribute) > lowOne { // risk of rounding to zero - lowOne = uint32((total * 3) / (toDistribute * 2)) + lowOne = (total * 3) / (toDistribute * 2) for i, cnt := range s.count[:s.symbolLen] { if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { s.norm[i] = 1 @@ -708,15 +692,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { im := int32((nbBitsOut << 16) - first.deltaNbBits) lu := (im >> nbBitsOut) + int32(first.deltaFindState) c.state = c.stateTable[lu] - return -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encode(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState) - c.bw.addBits16NC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] } // flush will write the tablelog to the output and flush the remaining full bytes. diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go index 6c17dc17f..474cb77d2 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -59,7 +59,7 @@ func fillBase(dst []baseOffset, base uint32, bits ...uint8) { } for i, bit := range bits { if base > math.MaxInt32 { - panic(fmt.Sprintf("invalid decoding table, base overflows int32")) + panic("invalid decoding table, base overflows int32") } dst[i] = baseOffset{ diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go index 4a752067f..5d73c21eb 100644 --- a/vendor/github.com/klauspost/compress/zstd/hash.go +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -13,65 +13,23 @@ const ( prime8bytes = 0xcf1bbcdcb7a56463 ) -// hashLen returns a hash of the lowest l bytes of u for a size size of h bytes. -// l must be >=4 and <=8. Any other value will return hash for 4 bytes. -// h should always be <32. -// Preferably h and l should be a constant. -// FIXME: This does NOT get resolved, if 'mls' is constant, -// so this cannot be used. -func hashLen(u uint64, hashLog, mls uint8) uint32 { +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) case 5: - return hash5(u, hashLog) + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) case 6: - return hash6(u, hashLog) + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) case 7: - return hash7(u, hashLog) + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) case 8: - return hash8(u, hashLog) + return uint32((u * prime8bytes) >> (64 - length)) default: - return hash4x64(u, hashLog) + return (uint32(u) * prime4bytes) >> (32 - length) } } - -// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash3(u uint32, h uint8) uint32 { - return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31) -} - -// hash4 returns the hash of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash4(u uint32, h uint8) uint32 { - return (u * prime4bytes) >> ((32 - h) & 31) -} - -// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash4x64(u uint64, h uint8) uint32 { - return (uint32(u) * prime4bytes) >> ((32 - h) & 31) -} - -// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash5(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63)) -} - -// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash6(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) -} - -// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash7(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) -} - -// hash8 returns the hash of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash8(u uint64, h uint8) uint32 { - return uint32((u * prime8bytes) >> ((64 - h) & 63)) -} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go index f783e32d2..28b40153c 100644 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -10,20 +10,31 @@ import ( // history contains the information transferred between blocks. type history struct { - b []byte - huffTree *huff0.Scratch - recentOffsets [3]int + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression decoders sequenceDecs - windowSize int - maxSize int - error bool - dict *dict + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict } // reset will reset the history to initial state of a frame. // The history must already have been initialized to the desired size. func (h *history) reset() { h.b = h.b[:0] + h.ignoreBuffer = 0 h.error = false h.recentOffsets = [3]int{1, 4, 8} if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { @@ -35,7 +46,7 @@ func (h *history) reset() { if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { fseDecoderPool.Put(f) } - h.decoders = sequenceDecs{} + h.decoders = sequenceDecs{br: h.decoders.br} if h.huffTree != nil { if h.dict == nil || h.dict.litEnc != h.huffTree { huffDecoderPool.Put(h.huffTree) @@ -54,6 +65,7 @@ func (h *history) setDict(dict *dict) { h.decoders.litLengths = dict.llDec h.decoders.offsets = dict.ofDec h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content h.recentOffsets = dict.offsets h.huffTree = dict.litEnc } @@ -83,6 +95,24 @@ func (h *history) append(b []byte) { copy(h.b[h.windowSize-len(b):], b) } +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + // append bytes to history without ever discarding anything. func (h *history) appendKeep(b []byte) { h.b = append(h.b, b...) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go index 426b9cac7..2c112a0ab 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -195,7 +195,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error { b, d.v4 = consumeUint64(b) b, d.total = consumeUint64(b) copy(d.mem[:], b) - b = b[len(d.mem):] d.n = int(d.total % uint64(len(d.mem))) return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s index 2c9c5357a..cea178561 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -1,12 +1,13 @@ // +build !appengine // +build gc // +build !purego +// +build !noasm #include "textflag.h" // Register allocation: // AX h -// CX pointer to advance through b +// SI pointer to advance through b // DX n // BX loop end // R8 v1, k1 @@ -16,39 +17,39 @@ // R12 tmp // R13 prime1v // R14 prime2v -// R15 prime4v +// DI prime4v -// round reads from and advances the buffer pointer in CX. +// round reads from and advances the buffer pointer in SI. // It assumes that R13 has prime1v and R14 has prime2v. #define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ + MOVQ (SI), R12 \ + ADDQ $8, SI \ IMULQ R14, R12 \ ADDQ R12, r \ ROLQ $31, r \ IMULQ R13, r // mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. #define mergeRound(acc, val) \ IMULQ R14, val \ ROLQ $31, val \ IMULQ R13, val \ XORQ val, acc \ IMULQ R13, acc \ - ADDQ R15, acc + ADDQ DI, acc // func Sum64(b []byte) uint64 TEXT ·Sum64(SB), NOSPLIT, $0-32 // Load fixed primes. MOVQ ·prime1v(SB), R13 MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 + MOVQ ·prime4v(SB), DI // Load slice. - MOVQ b_base+0(FP), CX + MOVQ b_base+0(FP), SI MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX + LEAQ (SI)(DX*1), BX // The first loop limit will be len(b)-32. SUBQ $32, BX @@ -65,14 +66,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32 XORQ R11, R11 SUBQ R13, R11 - // Loop until CX > BX. + // Loop until SI > BX. blockLoop: round(R8) round(R9) round(R10) round(R11) - CMPQ CX, BX + CMPQ SI, BX JLE blockLoop MOVQ R8, AX @@ -100,16 +101,16 @@ noBlocks: afterBlocks: ADDQ DX, AX - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. ADDQ $24, BX - CMPQ CX, BX + CMPQ SI, BX JG fourByte wordLoop: // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX + MOVQ (SI), R8 + ADDQ $8, SI IMULQ R14, R8 ROLQ $31, R8 IMULQ R13, R8 @@ -117,18 +118,18 @@ wordLoop: XORQ R8, AX ROLQ $27, AX IMULQ R13, AX - ADDQ R15, AX + ADDQ DI, AX - CMPQ CX, BX + CMPQ SI, BX JLE wordLoop fourByte: ADDQ $4, BX - CMPQ CX, BX + CMPQ SI, BX JG singles - MOVL (CX), R8 - ADDQ $4, CX + MOVL (SI), R8 + ADDQ $4, SI IMULQ R13, R8 XORQ R8, AX @@ -138,19 +139,19 @@ fourByte: singles: ADDQ $4, BX - CMPQ CX, BX + CMPQ SI, BX JGE finalize singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX + MOVBQZX (SI), R12 + ADDQ $1, SI IMULQ ·prime5v(SB), R12 XORQ R12, AX ROLQ $11, AX IMULQ R13, AX - CMPQ CX, BX + CMPQ SI, BX JL singlesLoop finalize: @@ -179,13 +180,13 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40 MOVQ ·prime2v(SB), R14 // Load slice. - MOVQ arg1_base+8(FP), CX - MOVQ arg1_len+16(FP), DX - LEAQ (CX)(DX*1), BX + MOVQ b_base+8(FP), SI + MOVQ b_len+16(FP), DX + LEAQ (SI)(DX*1), BX SUBQ $32, BX // Load vN from d. - MOVQ arg+0(FP), AX + MOVQ d+0(FP), AX MOVQ 0(AX), R8 // v1 MOVQ 8(AX), R9 // v2 MOVQ 16(AX), R10 // v3 @@ -199,7 +200,7 @@ blockLoop: round(R10) round(R11) - CMPQ CX, BX + CMPQ SI, BX JLE blockLoop // Copy vN back to d. @@ -208,8 +209,8 @@ blockLoop: MOVQ R10, 16(AX) MOVQ R11, 24(AX) - // The number of bytes written is CX minus the old base pointer. - SUBQ arg1_base+8(FP), CX - MOVQ CX, ret+32(FP) + // The number of bytes written is SI minus the old base pointer. + SUBQ b_base+8(FP), SI + MOVQ SI, ret+32(FP) RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s new file mode 100644 index 000000000..4d64a17d6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -0,0 +1,186 @@ +// +build gc,!purego,!noasm + +#include "textflag.h" + +// Register allocation. +#define digest R1 +#define h R2 // Return value. +#define p R3 // Input pointer. +#define len R4 +#define nblocks R5 // len / 32. +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc \ + +// x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x \ + +#define mergeRound(x) \ + round0(x) \ + EOR x, h \ + MADD h, prime4, prime1, h \ + +// Update v[1-4] with 32-byte blocks. Assumes len >= 32. +#define blocksLoop() \ + LSR $5, len, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 32(p), (x1, x2) \ + round(v1, x1) \ + LDP -16(p), (x3, x4) \ + round(v2, x2) \ + SUB $1, nblocks \ + round(v3, x3) \ + round(v4, x4) \ + CBNZ nblocks, loop \ + +// The primes are repeated here to ensure that they're stored +// in a contiguous array, so we can load them with LDP. +DATA primes<> +0(SB)/8, $11400714785074694791 +DATA primes<> +8(SB)/8, $14029467366897019727 +DATA primes<>+16(SB)/8, $1609587929392839161 +DATA primes<>+24(SB)/8, $9650029242287828579 +DATA primes<>+32(SB)/8, $2870177450012600261 +GLOBL primes<>(SB), NOPTR+RODATA, $40 + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32 + LDP b_base+0(FP), (p, len) + + LDP primes<> +0(SB), (prime1, prime2) + LDP primes<>+16(SB), (prime3, prime4) + MOVD primes<>+32(SB), prime5 + + CMP $32, len + CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 } + BLO afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blocksLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(v1) + mergeRound(v2) + mergeRound(v3) + mergeRound(v4) + +afterLoop: + ADD len, h + + TBZ $4, len, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, len, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, len, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, len, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h + MUL prime1, h + +try1: + TBZ $0, len, end + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h + MUL prime1, h + +end: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +// +// Assumes len(b) >= 32. +TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40 + LDP primes<>(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, len) + + blocksLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, len + MOVD len, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go similarity index 54% rename from vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go rename to vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go index 35318d7c4..1a1fac9c2 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -1,6 +1,9 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm +// +build amd64 arm64 // +build !appengine // +build gc // +build !purego +// +build !noasm package xxhash @@ -10,4 +13,4 @@ package xxhash func Sum64(b []byte) uint64 //go:noescape -func writeBlocks(*Digest, []byte) int +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go index 4a5a82160..209cb4a99 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -1,4 +1,5 @@ -// +build !amd64 appengine !gc purego +//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm +// +build !amd64,!arm64 appengine !gc purego noasm package xxhash diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index b5c8ef133..df0447203 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -20,6 +20,10 @@ type seq struct { llCode, mlCode, ofCode uint8 } +type seqVals struct { + ll, ml, mo int +} + func (s seq) String() string { if s.offset <= 3 { if s.offset == 0 { @@ -61,16 +65,19 @@ type sequenceDecs struct { offsets sequenceDec matchLengths sequenceDec prevOffset [3]int - hist []byte dict []byte literals []byte out []byte + nSeqs int + br *bitReader + seqSize int windowSize int maxBits uint8 + maxSyncLen uint64 } // initialize all 3 decoders from the stream input. -func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error { +func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) error { if err := s.litLengths.init(br); err != nil { return errors.New("litLengths:" + err.Error()) } @@ -80,8 +87,7 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] if err := s.matchLengths.init(br); err != nil { return errors.New("matchLengths:" + err.Error()) } - s.literals = literals - s.hist = hist.b + s.br = br s.prevOffset = hist.recentOffsets s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits s.windowSize = hist.windowSize @@ -93,12 +99,127 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] return nil } +// execute will execute the decoded sequence with the provided history. +// The sequence must be evaluated before being sent. +func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error { + if len(s.dict) == 0 { + return s.executeSimple(seqs, hist) + } + + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + // decode sequences from the stream with the provided history. -func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + + br := s.br + seqs := s.nSeqs startSize := len(s.out) // Grab full sizes tables, to avoid bounds checks. llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } for i := seqs - 1; i >= 0; i-- { if br.overread() { @@ -151,7 +272,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { if temp == 0 { // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") + println("WARNING: temp was 0") temp = 1 } @@ -176,44 +297,49 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { if ll > len(s.literals) { return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) } - size := ll + ml + len(s.out) + size := ll + ml + len(out) if size-startSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size", size) + return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize) } - if size > cap(s.out) { - // Not enough size, will be extremely rarely triggered, + if size > cap(out) { + // Not enough size, which can happen under high volume block streaming conditions // but could be if destination slice is too small for sync operations. - // We add maxBlockSize to the capacity. - s.out = append(s.out, make([]byte, maxBlockSize)...) - s.out = s.out[:len(s.out)-maxBlockSize] + // over-allocating here can create a large amount of GC pressure so we try to keep + // it as contained as possible + used := len(out) - startSize + addBytes := 256 + ll + ml + used>>2 + // Clamp to max block size. + if used+addBytes > maxBlockSize { + addBytes = maxBlockSize - used + } + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] } if ml > maxMatchLen { return fmt.Errorf("match len (%d) bigger than max allowed length", ml) } // Add literals - s.out = append(s.out, s.literals[:ll]...) + out = append(out, s.literals[:ll]...) s.literals = s.literals[ll:] - out := s.out if mo == 0 && ml > 0 { return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) } - if mo > len(s.out)+len(hist) || mo > s.windowSize { + if mo > len(out)+len(hist) || mo > s.windowSize { if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) } // we may be in dictionary. - dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) + dictO := len(s.dict) - (mo - (len(out) + len(hist))) if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) } end := dictO + ml if end > len(s.dict) { out = append(out, s.dict[dictO:]...) - mo -= len(s.dict) - dictO ml -= len(s.dict) - dictO } else { out = append(out, s.dict[dictO:end]...) @@ -224,26 +350,25 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { // Copy from history. // TODO: Blocks without history could be made to ignore this completely. - if v := mo - len(s.out); v > 0 { + if v := mo - len(out); v > 0 { // v is the start position in history from end. - start := len(s.hist) - v + start := len(hist) - v if ml > v { // Some goes into current block. // Copy remainder of history - out = append(out, s.hist[start:]...) - mo -= v + out = append(out, hist[start:]...) ml -= v } else { - out = append(out, s.hist[start:start+ml]...) + out = append(out, hist[start:start+ml]...) ml = 0 } } // We must be in current buffer now if ml > 0 { - start := len(s.out) - mo - if ml <= len(s.out)-start { + start := len(out) - mo + if ml <= len(out)-start { // No overlap - out = append(out, s.out[start:start+ml]...) + out = append(out, out[start:start+ml]...) } else { // Overlapping copy // Extend destination slice and copy one byte at the time. @@ -257,7 +382,6 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } } } - s.out = out if i == 0 { // This is the last sequence, so we shouldn't update state. break @@ -271,7 +395,8 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { mlState = mlTable[mlState.newState()&maxTableMask] ofState = ofTable[ofState.newState()&maxTableMask] } else { - bits := br.getBitsFast(nBits) + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) llState = llTable[(llState.newState()+lowBits)&maxTableMask] @@ -284,19 +409,14 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } } - // Add final literals - s.out = append(s.out, s.literals...) - return nil -} + // Check if space for literals + if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize) + } -// update states, at least 27 bits must be available. -func (s *sequenceDecs) update(br *bitReader) { - // Max 8 bits - s.litLengths.state.next(br) - // Max 9 bits - s.matchLengths.state.next(br) - // Max 8 bits - s.offsets.state.next(br) + // Add final literals + s.out = append(out, s.literals...) + return br.close() } var bitMask [16]uint16 @@ -307,87 +427,6 @@ func init() { } } -// update states, at least 27 bits must be available. -func (s *sequenceDecs) updateAlt(br *bitReader) { - // Update all 3 states at once. Approx 20% faster. - a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - - nBits := a.nbBits() + b.nbBits() + c.nbBits() - if nBits == 0 { - s.litLengths.state.state = s.litLengths.state.dt[a.newState()] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()] - s.offsets.state.state = s.offsets.state.dt[c.newState()] - return - } - bits := br.getBitsFast(nBits) - lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31)) - s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits] - - lowBits = uint16(bits >> (c.nbBits() & 31)) - lowBits &= bitMask[b.nbBits()&15] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits] - - lowBits = uint16(bits) & bitMask[c.nbBits()&15] - s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits] -} - -// nextFast will return new states when there are at least 4 unused bytes left on the stream when done. -func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { - // Final will not read from stream. - ll, llB := llState.final() - ml, mlB := mlState.final() - mo, moB := ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - return - } - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - return - } - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - return -} - func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { // Final will not read from stream. ll, llB := llState.final() @@ -450,36 +489,3 @@ func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { s.prevOffset[0] = temp return temp } - -// mergeHistory will merge history. -func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) { - for i := uint(0); i < 3; i++ { - var sNew, sHist *sequenceDec - switch i { - default: - // same as "case 0": - sNew = &s.litLengths - sHist = &hist.litLengths - case 1: - sNew = &s.offsets - sHist = &hist.offsets - case 2: - sNew = &s.matchLengths - sHist = &hist.matchLengths - } - if sNew.repeat { - if sHist.fse == nil { - return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i) - } - continue - } - if sNew.fse == nil { - return nil, fmt.Errorf("sequence stream %d, no fse found", i) - } - if sHist.fse != nil && !sHist.fse.preDefined { - fseDecoderPool.Put(sHist.fse) - } - sHist.fse = sNew.fse - } - return hist, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 000000000..847b322ae --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,362 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 000000000..212c6cac3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,3689 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm +// +build !appengine,!noasm,gc,!noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + CMPQ R14, $0x00 + JZ sequenceDecs_decode_amd64_llState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, DI + +sequenceDecs_decode_amd64_llState_updateState_skip_zero: + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + CMPQ R14, $0x00 + JZ sequenceDecs_decode_amd64_mlState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, R8 + +sequenceDecs_decode_amd64_mlState_updateState_skip_zero: + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + CMPQ R14, $0x00 + JZ sequenceDecs_decode_amd64_ofState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, R9 + +sequenceDecs_decode_amd64_ofState_updateState_skip_zero: + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_adjust_end + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_adjust_end + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_adjust_end: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + CMPQ R14, $0x00 + JZ sequenceDecs_decode_56_amd64_llState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, DI + +sequenceDecs_decode_56_amd64_llState_updateState_skip_zero: + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + CMPQ R14, $0x00 + JZ sequenceDecs_decode_56_amd64_mlState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, R8 + +sequenceDecs_decode_56_amd64_mlState_updateState_skip_zero: + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + CMPQ R14, $0x00 + JZ sequenceDecs_decode_56_amd64_ofState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, R9 + +sequenceDecs_decode_56_amd64_ofState_updateState_skip_zero: + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_adjust_end + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_adjust_end + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_adjust_end: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_adjust_end + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_adjust_end + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_adjust_end: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_adjust_end + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_adjust_end + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_adjust_end: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JGE copy_all_from_history + XORQ R11, R11 + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(R11*1), R12 + MOVB R12, (BX)(R11*1) + ADDQ $0x01, R11 + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(R11*1), R12 + MOVW R12, (BX)(R11*1) + ADDQ $0x02, R11 + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(R11*1), R12 + MOVL R12, (BX)(R11*1) + ADDQ $0x04, R11 + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(R11*1), R12 + MOVQ R12, (BX)(R11*1) + ADDQ $0x08, R11 + JMP copy_4_test + +copy_4: + MOVUPS (R14)(R11*1), X0 + MOVUPS X0, (BX)(R11*1) + ADDQ $0x10, R11 + +copy_4_test: + CMPQ R11, R13 + JB copy_4 + ADDQ R13, DI + ADDQ R13, BX + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, R11 + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (BX)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, R11 + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (BX)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, R11 + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (BX)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, R11 + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (BX)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (BX)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, R11 + JB copy_5 + ADDQ R11, BX + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + TESTQ $0x00000001, R11 + JZ copy_1_word + MOVB (SI)(R14*1), R15 + MOVB R15, (BX)(R14*1) + ADDQ $0x01, R14 + +copy_1_word: + TESTQ $0x00000002, R11 + JZ copy_1_dword + MOVW (SI)(R14*1), R15 + MOVW R15, (BX)(R14*1) + ADDQ $0x02, R14 + +copy_1_dword: + TESTQ $0x00000004, R11 + JZ copy_1_qword + MOVL (SI)(R14*1), R15 + MOVL R15, (BX)(R14*1) + ADDQ $0x04, R14 + +copy_1_qword: + TESTQ $0x00000008, R11 + JZ copy_1_test + MOVQ (SI)(R14*1), R15 + MOVQ R15, (BX)(R14*1) + ADDQ $0x08, R14 + JMP copy_1_test + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + +copy_1_test: + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JGE copy_all_from_history + XORQ R11, R11 + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(R11*1), R12 + MOVB R12, (BX)(R11*1) + ADDQ $0x01, R11 + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(R11*1), R12 + MOVW R12, (BX)(R11*1) + ADDQ $0x02, R11 + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(R11*1), R12 + MOVL R12, (BX)(R11*1) + ADDQ $0x04, R11 + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(R11*1), R12 + MOVQ R12, (BX)(R11*1) + ADDQ $0x08, R11 + JMP copy_4_test + +copy_4: + MOVUPS (R14)(R11*1), X0 + MOVUPS X0, (BX)(R11*1) + ADDQ $0x10, R11 + +copy_4_test: + CMPQ R11, R13 + JB copy_4 + ADDQ R13, DI + ADDQ R13, BX + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, R11 + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (BX)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, R11 + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (BX)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, R11 + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (BX)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, R11 + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (BX)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (BX)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, R11 + JB copy_5 + ADDQ R11, BX + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + XORQ R12, R12 + TESTQ $0x00000001, R13 + JZ copy_2_word + MOVB (R11)(R12*1), R14 + MOVB R14, (BX)(R12*1) + ADDQ $0x01, R12 + +copy_2_word: + TESTQ $0x00000002, R13 + JZ copy_2_dword + MOVW (R11)(R12*1), R14 + MOVW R14, (BX)(R12*1) + ADDQ $0x02, R12 + +copy_2_dword: + TESTQ $0x00000004, R13 + JZ copy_2_qword + MOVL (R11)(R12*1), R14 + MOVL R14, (BX)(R12*1) + ADDQ $0x04, R12 + +copy_2_qword: + TESTQ $0x00000008, R13 + JZ copy_2_test + MOVQ (R11)(R12*1), R14 + MOVQ R14, (BX)(R12*1) + ADDQ $0x08, R12 + JMP copy_2_test + +copy_2: + MOVUPS (R11)(R12*1), X0 + MOVUPS X0, (BX)(R12*1) + ADDQ $0x10, R12 + +copy_2_test: + CMPQ R12, R13 + JB copy_2 + ADDQ R13, BX + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_amd64_llState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, DI + +sequenceDecs_decodeSync_amd64_llState_updateState_skip_zero: + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_amd64_mlState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, R8 + +sequenceDecs_decodeSync_amd64_mlState_updateState_skip_zero: + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_amd64_ofState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, R9 + +sequenceDecs_decodeSync_amd64_ofState_updateState_skip_zero: + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_adjust_end + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_adjust_end + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + LEAQ 144(CX), R15 + ADDQ (R15)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_adjust_end: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JGE copy_all_from_history + XORQ AX, AX + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(AX*1), CL + MOVB CL, (R10)(AX*1) + ADDQ $0x01, AX + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(AX*1), CX + MOVW CX, (R10)(AX*1) + ADDQ $0x02, AX + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(AX*1), CX + MOVL CX, (R10)(AX*1) + ADDQ $0x04, AX + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(AX*1), CX + MOVQ CX, (R10)(AX*1) + ADDQ $0x08, AX + JMP copy_4_test + +copy_4: + MOVUPS (R14)(AX*1), X0 + MOVUPS X0, (R10)(AX*1) + ADDQ $0x10, AX + +copy_4_test: + CMPQ AX, R13 + JB copy_4 + ADDQ R13, R12 + ADDQ R13, R10 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, AX + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (R10)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, AX + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (R10)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, AX + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (R10)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, AX + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (R10)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (R10)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, AX + JB copy_5 + ADDQ AX, R10 + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_adjust_end + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_end + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + LEAQ 144(CX), R15 + ADDQ (R15)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_adjust_end: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JGE copy_all_from_history + XORQ CX, CX + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(CX*1), R12 + MOVB R12, (R9)(CX*1) + ADDQ $0x01, CX + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(CX*1), R12 + MOVW R12, (R9)(CX*1) + ADDQ $0x02, CX + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(CX*1), R12 + MOVL R12, (R9)(CX*1) + ADDQ $0x04, CX + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(CX*1), R12 + MOVQ R12, (R9)(CX*1) + ADDQ $0x08, CX + JMP copy_4_test + +copy_4: + MOVUPS (R14)(CX*1), X0 + MOVUPS X0, (R9)(CX*1) + ADDQ $0x10, CX + +copy_4_test: + CMPQ CX, R13 + JB copy_4 + ADDQ R13, R11 + ADDQ R13, R9 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, CX + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (R9)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, CX + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (R9)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, CX + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (R9)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, CX + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (R9)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (R9)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, CX + JB copy_5 + ADDQ CX, R9 + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_llState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, DI + +sequenceDecs_decodeSync_safe_amd64_llState_updateState_skip_zero: + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_mlState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, R8 + +sequenceDecs_decodeSync_safe_amd64_mlState_updateState_skip_zero: + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_ofState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, R9 + +sequenceDecs_decodeSync_safe_amd64_ofState_updateState_skip_zero: + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_adjust_end + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_end + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + LEAQ 144(CX), R15 + ADDQ (R15)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_adjust_end: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + TESTQ $0x00000001, AX + JZ copy_1_word + MOVB (R11)(R14*1), R15 + MOVB R15, (R10)(R14*1) + ADDQ $0x01, R14 + +copy_1_word: + TESTQ $0x00000002, AX + JZ copy_1_dword + MOVW (R11)(R14*1), R15 + MOVW R15, (R10)(R14*1) + ADDQ $0x02, R14 + +copy_1_dword: + TESTQ $0x00000004, AX + JZ copy_1_qword + MOVL (R11)(R14*1), R15 + MOVL R15, (R10)(R14*1) + ADDQ $0x04, R14 + +copy_1_qword: + TESTQ $0x00000008, AX + JZ copy_1_test + MOVQ (R11)(R14*1), R15 + MOVQ R15, (R10)(R14*1) + ADDQ $0x08, R14 + JMP copy_1_test + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + +copy_1_test: + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JGE copy_all_from_history + XORQ AX, AX + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(AX*1), CL + MOVB CL, (R10)(AX*1) + ADDQ $0x01, AX + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(AX*1), CX + MOVW CX, (R10)(AX*1) + ADDQ $0x02, AX + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(AX*1), CX + MOVL CX, (R10)(AX*1) + ADDQ $0x04, AX + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(AX*1), CX + MOVQ CX, (R10)(AX*1) + ADDQ $0x08, AX + JMP copy_4_test + +copy_4: + MOVUPS (R14)(AX*1), X0 + MOVUPS X0, (R10)(AX*1) + ADDQ $0x10, AX + +copy_4_test: + CMPQ AX, R13 + JB copy_4 + ADDQ R13, R12 + ADDQ R13, R10 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, AX + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (R10)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, AX + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (R10)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, AX + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (R10)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, AX + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (R10)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (R10)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, AX + JB copy_5 + ADDQ AX, R10 + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + XORQ CX, CX + TESTQ $0x00000001, R13 + JZ copy_2_word + MOVB (AX)(CX*1), R14 + MOVB R14, (R10)(CX*1) + ADDQ $0x01, CX + +copy_2_word: + TESTQ $0x00000002, R13 + JZ copy_2_dword + MOVW (AX)(CX*1), R14 + MOVW R14, (R10)(CX*1) + ADDQ $0x02, CX + +copy_2_dword: + TESTQ $0x00000004, R13 + JZ copy_2_qword + MOVL (AX)(CX*1), R14 + MOVL R14, (R10)(CX*1) + ADDQ $0x04, CX + +copy_2_qword: + TESTQ $0x00000008, R13 + JZ copy_2_test + MOVQ (AX)(CX*1), R14 + MOVQ R14, (R10)(CX*1) + ADDQ $0x08, CX + JMP copy_2_test + +copy_2: + MOVUPS (AX)(CX*1), X0 + MOVUPS X0, (R10)(CX*1) + ADDQ $0x10, CX + +copy_2_test: + CMPQ CX, R13 + JB copy_2 + ADDQ R13, R10 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_end + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_end + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + LEAQ 144(CX), R15 + ADDQ (R15)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_adjust_end: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + TESTQ $0x00000001, CX + JZ copy_1_word + MOVB (R10)(R14*1), R15 + MOVB R15, (R9)(R14*1) + ADDQ $0x01, R14 + +copy_1_word: + TESTQ $0x00000002, CX + JZ copy_1_dword + MOVW (R10)(R14*1), R15 + MOVW R15, (R9)(R14*1) + ADDQ $0x02, R14 + +copy_1_dword: + TESTQ $0x00000004, CX + JZ copy_1_qword + MOVL (R10)(R14*1), R15 + MOVL R15, (R9)(R14*1) + ADDQ $0x04, R14 + +copy_1_qword: + TESTQ $0x00000008, CX + JZ copy_1_test + MOVQ (R10)(R14*1), R15 + MOVQ R15, (R9)(R14*1) + ADDQ $0x08, R14 + JMP copy_1_test + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + +copy_1_test: + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JGE copy_all_from_history + XORQ CX, CX + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(CX*1), R12 + MOVB R12, (R9)(CX*1) + ADDQ $0x01, CX + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(CX*1), R12 + MOVW R12, (R9)(CX*1) + ADDQ $0x02, CX + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(CX*1), R12 + MOVL R12, (R9)(CX*1) + ADDQ $0x04, CX + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(CX*1), R12 + MOVQ R12, (R9)(CX*1) + ADDQ $0x08, CX + JMP copy_4_test + +copy_4: + MOVUPS (R14)(CX*1), X0 + MOVUPS X0, (R9)(CX*1) + ADDQ $0x10, CX + +copy_4_test: + CMPQ CX, R13 + JB copy_4 + ADDQ R13, R11 + ADDQ R13, R9 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, CX + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (R9)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, CX + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (R9)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, CX + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (R9)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, CX + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (R9)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (R9)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, CX + JB copy_5 + ADDQ CX, R9 + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + XORQ R12, R12 + TESTQ $0x00000001, R13 + JZ copy_2_word + MOVB (CX)(R12*1), R14 + MOVB R14, (R9)(R12*1) + ADDQ $0x01, R12 + +copy_2_word: + TESTQ $0x00000002, R13 + JZ copy_2_dword + MOVW (CX)(R12*1), R14 + MOVW R14, (R9)(R12*1) + ADDQ $0x02, R12 + +copy_2_dword: + TESTQ $0x00000004, R13 + JZ copy_2_qword + MOVL (CX)(R12*1), R14 + MOVL R14, (R9)(R12*1) + ADDQ $0x04, R12 + +copy_2_qword: + TESTQ $0x00000008, R13 + JZ copy_2_test + MOVQ (CX)(R12*1), R14 + MOVQ R14, (R9)(R12*1) + ADDQ $0x08, R12 + JMP copy_2_test + +copy_2: + MOVUPS (CX)(R12*1), X0 + MOVUPS X0, (R9)(R12*1) + ADDQ $0x10, R12 + +copy_2_test: + CMPQ R12, R13 + JB copy_2 + ADDQ R13, R9 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 000000000..c3452bc3a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go index 36bcc3cc0..8014174a7 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqenc.go +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -35,7 +35,6 @@ func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { // Ensure we cannot reuse by accident prevEnc := *prev prevEnc.symbolLen = 0 - return } compareSwap(ll, &s.llEnc, &s.llPrev) compareSwap(ml, &s.mlEnc, &s.mlPrev) diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index 841fd95ac..9e1baad73 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -11,7 +11,7 @@ import ( "io" "github.com/klauspost/compress/huff0" - "github.com/klauspost/compress/snappy" + snappy "github.com/klauspost/compress/internal/snapref" ) const ( @@ -185,7 +185,6 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { r.block.reset(nil) r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) if err != nil { - println("snappy.Decode:", err) return written, err } err = r.block.encodeLits(r.block.literals, false) @@ -204,7 +203,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { written += int64(n) continue case chunkTypeUncompressedData: - if debug { + if debugEncoder { println("Uncompressed, chunklen", chunkLen) } // Section 4.3. Uncompressed data (chunk type 0x01). @@ -247,7 +246,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { continue case chunkTypeStreamIdentifier: - if debug { + if debugEncoder { println("stream id", chunkLen, len(snappyMagicBody)) } // Section 4.1. Stream identifier (chunk type 0xff). @@ -417,7 +416,7 @@ var crcTable = crc32.MakeTable(crc32.Castagnoli) // https://github.com/google/snappy/blob/master/framing_format.txt func snappyCRC(b []byte) uint32 { c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 + return c>>15 | c<<17 + 0xa282ead8 } // snappyDecodedLen returns the length of the decoded block and the number of bytes diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go new file mode 100644 index 000000000..29c15c8c4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -0,0 +1,141 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. +// See https://www.winzip.com/win/en/comp_info.html +const ZipMethodWinZip = 93 + +// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. +// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. +// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT +const ZipMethodPKWare = 20 + +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() interface{} { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} + +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d + } + return &pooledZipReader{dec: dec, pool: pool} + } +} + +type pooledZipReader struct { + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder +} + +func (r *pooledZipReader) Read(p []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.dec == nil { + return 0, errors.New("read after close or EOF") + } + dec, err := r.dec.Read(p) + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return dec, err +} + +func (r *pooledZipReader) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.dec != nil { + err = r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return err +} + +type pooledZipWriter struct { + mu sync.Mutex // guards Close and Read + enc *Encoder + pool *sync.Pool +} + +func (w *pooledZipWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.enc == nil { + return 0, errors.New("Write after Close") + } + return w.enc.Write(p) +} + +func (w *pooledZipWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + var err error + if w.enc != nil { + err = w.enc.Close() + w.pool.Put(w.enc) + w.enc = nil + } + return err +} + +// ZipCompressor returns a compressor that can be registered with zip libraries. +// The provided encoder options will be used on all encodes. +func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { + var pool sync.Pool + return func(w io.Writer) (io.WriteCloser, error) { + enc, ok := pool.Get().(*Encoder) + if ok { + enc.Reset(w) + } else { + var err error + enc, err = NewWriter(w, opts...) + if err != nil { + return nil, err + } + } + return &pooledZipWriter{enc: enc, pool: &pool}, nil + } +} + +// ZipDecompressor returns a decompressor that can be registered with zip libraries. +// See ZipCompressor for example. +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 0807719c8..3eb3f1c82 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -4,6 +4,8 @@ package zstd import ( + "bytes" + "encoding/binary" "errors" "log" "math" @@ -13,6 +15,12 @@ import ( // enable debug printing const debug = false +// enable encoding debug printing +const debugEncoder = debug + +// enable decoding debug printing +const debugDecoder = debug + // Enable extra assertions. const debugAsserts = debug || false @@ -31,6 +39,9 @@ const zstdMinMatch = 3 // Reset the buffer offset when reaching this. const bufferReset = math.MaxInt32 - MaxWindowSize +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + var ( // ErrReservedBlockType is returned when a reserved block type is found. // Typically this indicates wrong or corrupted input. @@ -44,6 +55,10 @@ var ( // Typically returned on invalid input. ErrBlockTooSmall = errors.New("block too small") + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. // Typically this indicates wrong or corrupted input. ErrMagicMismatch = errors.New("invalid input: magic number mismatch") @@ -67,37 +82,34 @@ var ( // This is only returned if SingleSegment is specified on the frame. ErrFrameSizeExceeded = errors.New("frame size exceeded") + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + // ErrCRCMismatch is returned if CRC mismatches. ErrCRCMismatch = errors.New("CRC check failed") // ErrDecoderClosed will be returned if the Decoder was used after // Close has been called. ErrDecoderClosed = errors.New("decoder used after Close") + + // ErrDecoderNilInput is returned when a nil Reader was provided + // and an operation other than Reset/DecodeAll/Close was attempted. + ErrDecoderNilInput = errors.New("nil input provided as reader") ) func println(a ...interface{}) { - if debug { + if debug || debugDecoder || debugEncoder { log.Println(a...) } } func printf(format string, a ...interface{}) { - if debug { + if debug || debugDecoder || debugEncoder { log.Printf(format, a...) } } -// matchLenFast does matching, but will not match the last up to 7 bytes. -func matchLenFast(a, b []byte) int { - endI := len(a) & (math.MaxInt32 - 7) - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - return i + bits.TrailingZeros64(diff)>>3 - } - } - return endI -} - // matchLen returns the maximum length. // a must be the shortest of the two. // The function also returns whether all bytes matched. @@ -121,24 +133,20 @@ func matchLen(a, b []byte) int { } func load3232(b []byte, i int32) uint32 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:4] - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + return binary.LittleEndian.Uint32(b[i:]) } func load6432(b []byte, i int32) uint64 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:8] - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + return binary.LittleEndian.Uint64(b[i:]) } func load64(b []byte, i int) uint64 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:8] - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + return binary.LittleEndian.Uint64(b[i:]) +} + +type byter interface { + Bytes() []byte + Len() int } + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml deleted file mode 100644 index 98db8f060..000000000 --- a/vendor/github.com/mattn/go-colorable/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - tip - -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -repotoken xnXqRGwgW3SXIguzxf90ZSK1GPYZPaGrw diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md index 56729a92c..ca0483711 100644 --- a/vendor/github.com/mattn/go-colorable/README.md +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -1,8 +1,8 @@ # go-colorable -[![Godoc Reference](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) -[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable) -[![Coverage Status](https://coveralls.io/repos/github/mattn/go-colorable/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-colorable?branch=master) +[![Build Status](https://github.com/mattn/go-colorable/workflows/test/badge.svg)](https://github.com/mattn/go-colorable/actions?query=workflow%3Atest) +[![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable) +[![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) [![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) Colorable writer for windows. diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go index 0b0aef837..416d1bbbf 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_appengine.go +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine package colorable @@ -27,3 +28,11 @@ func NewColorableStdout() io.Writer { func NewColorableStderr() io.Writer { return os.Stderr } + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go index 3fb771dcc..766d94603 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_others.go +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -1,5 +1,5 @@ -// +build !windows -// +build !appengine +//go:build !windows && !appengine +// +build !windows,!appengine package colorable @@ -28,3 +28,11 @@ func NewColorableStdout() io.Writer { func NewColorableStderr() io.Writer { return os.Stderr } + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go index 1bd628f25..1846ad5ab 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_windows.go +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -1,5 +1,5 @@ -// +build windows -// +build !appengine +//go:build windows && !appengine +// +build windows,!appengine package colorable @@ -10,6 +10,7 @@ import ( "os" "strconv" "strings" + "sync" "syscall" "unsafe" @@ -27,6 +28,9 @@ const ( backgroundRed = 0x40 backgroundIntensity = 0x80 backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) + commonLvbUnderscore = 0x8000 + + cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 ) const ( @@ -78,6 +82,8 @@ var ( procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") ) @@ -89,6 +95,7 @@ type Writer struct { oldattr word oldpos coord rest bytes.Buffer + mutex sync.Mutex } // NewColorable returns new instance of Writer which handles escape sequence from File. @@ -98,6 +105,10 @@ func NewColorable(file *os.File) io.Writer { } if isatty.IsTerminal(file.Fd()) { + var mode uint32 + if r, _, _ := procGetConsoleMode.Call(file.Fd(), uintptr(unsafe.Pointer(&mode))); r != 0 && mode&cENABLE_VIRTUAL_TERMINAL_PROCESSING != 0 { + return file + } var csbi consoleScreenBufferInfo handle := syscall.Handle(file.Fd()) procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) @@ -424,6 +435,8 @@ func atoiWithDefault(s string, def int) (int, error) { // Write writes data on console func (w *Writer) Write(data []byte) (n int, err error) { + w.mutex.Lock() + defer w.mutex.Unlock() var csbi consoleScreenBufferInfo procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) @@ -439,18 +452,22 @@ func (w *Writer) Write(data []byte) (n int, err error) { } else { er = bytes.NewReader(data) } - var bw [1]byte + var plaintext bytes.Buffer loop: for { c1, err := er.ReadByte() if err != nil { + plaintext.WriteTo(w.out) break loop } if c1 != 0x1b { - bw[0] = c1 - w.out.Write(bw[:]) + plaintext.WriteByte(c1) continue } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } c2, err := er.ReadByte() if err != nil { break loop @@ -675,14 +692,19 @@ loop: switch { case n == 0 || n == 100: attr = w.oldattr - case 1 <= n && n <= 5: - attr |= foregroundIntensity - case n == 7: - attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) - case n == 22 || n == 25: + case n == 4: + attr |= commonLvbUnderscore + case (1 <= n && n <= 3) || n == 5: attr |= foregroundIntensity - case n == 27: - attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case n == 7 || n == 27: + attr = + (attr &^ (foregroundMask | backgroundMask)) | + ((attr & foregroundMask) << 4) | + ((attr & backgroundMask) >> 4) + case n == 22: + attr &^= foregroundIntensity + case n == 24: + attr &^= commonLvbUnderscore case 30 <= n && n <= 37: attr &= backgroundMask if (n-30)&1 != 0 { @@ -701,7 +723,7 @@ loop: n256setup() } attr &= backgroundMask - attr |= n256foreAttr[n256] + attr |= n256foreAttr[n256%len(n256foreAttr)] i += 2 } } else if len(token) == 5 && token[i+1] == "2" { @@ -743,7 +765,7 @@ loop: n256setup() } attr &= foregroundMask - attr |= n256backAttr[n256] + attr |= n256backAttr[n256%len(n256backAttr)] i += 2 } } else if len(token) == 5 && token[i+1] == "2" { @@ -1003,3 +1025,23 @@ func n256setup() { n256backAttr[i] = c.backgroundAttr() } } + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + var mode uint32 + h := os.Stdout.Fd() + if r, _, _ := procGetConsoleMode.Call(h, uintptr(unsafe.Pointer(&mode))); r != 0 { + if r, _, _ = procSetConsoleMode.Call(h, uintptr(mode|cENABLE_VIRTUAL_TERMINAL_PROCESSING)); r != 0 { + if enabled != nil { + *enabled = true + } + return func() { + procSetConsoleMode.Call(h, uintptr(mode)) + } + } + } + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/go.mod b/vendor/github.com/mattn/go-colorable/go.mod index ef3ca9d4c..27351c027 100644 --- a/vendor/github.com/mattn/go-colorable/go.mod +++ b/vendor/github.com/mattn/go-colorable/go.mod @@ -1,3 +1,8 @@ module github.com/mattn/go-colorable -require github.com/mattn/go-isatty v0.0.8 +require ( + github.com/mattn/go-isatty v0.0.14 + golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6 // indirect +) + +go 1.13 diff --git a/vendor/github.com/mattn/go-colorable/go.sum b/vendor/github.com/mattn/go-colorable/go.sum index 2c12960ec..40c33b333 100644 --- a/vendor/github.com/mattn/go-colorable/go.sum +++ b/vendor/github.com/mattn/go-colorable/go.sum @@ -1,4 +1,5 @@ -github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6 h1:foEbQz/B0Oz6YIqu/69kfXPYeFQAuuMYFkjaqXzl5Wo= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/vendor/github.com/mattn/go-colorable/go.test.sh b/vendor/github.com/mattn/go-colorable/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go index 95f2c6be2..05d6f74bf 100644 --- a/vendor/github.com/mattn/go-colorable/noncolorable.go +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -18,18 +18,22 @@ func NewNonColorable(w io.Writer) io.Writer { // Write writes data on console func (w *NonColorable) Write(data []byte) (n int, err error) { er := bytes.NewReader(data) - var bw [1]byte + var plaintext bytes.Buffer loop: for { c1, err := er.ReadByte() if err != nil { + plaintext.WriteTo(w.out) break loop } if c1 != 0x1b { - bw[0] = c1 - w.out.Write(bw[:]) + plaintext.WriteByte(c1) continue } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } c2, err := er.ReadByte() if err != nil { break loop @@ -38,7 +42,6 @@ loop: continue } - var buf bytes.Buffer for { c, err := er.ReadByte() if err != nil { @@ -47,7 +50,6 @@ loop: if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { break } - buf.Write([]byte(string(c))) } } diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml deleted file mode 100644 index 5597e026d..000000000 --- a/vendor/github.com/mattn/go-isatty/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go -go: - - tip - -os: - - linux - - osx - -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5 diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md index 1e69004bb..38418353e 100644 --- a/vendor/github.com/mattn/go-isatty/README.md +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -1,7 +1,7 @@ # go-isatty [![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) -[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty) +[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty) [![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) [![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod index a8ddf404f..c9a20b7f3 100644 --- a/vendor/github.com/mattn/go-isatty/go.mod +++ b/vendor/github.com/mattn/go-isatty/go.mod @@ -1,5 +1,5 @@ module github.com/mattn/go-isatty -require golang.org/x/sys v0.0.0-20191008105621-543471e840be +go 1.12 -go 1.14 +require golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum index c141fc53a..912e29cbc 100644 --- a/vendor/github.com/mattn/go-isatty/go.sum +++ b/vendor/github.com/mattn/go-isatty/go.sum @@ -1,4 +1,2 @@ -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/mattn/go-isatty/go.test.sh b/vendor/github.com/mattn/go-isatty/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-isatty/isatty_android.go b/vendor/github.com/mattn/go-isatty/isatty_android.go deleted file mode 100644 index d3567cb5b..000000000 --- a/vendor/github.com/mattn/go-isatty/isatty_android.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build android - -package isatty - -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TCGETS - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} - -// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go index 07e93039d..39bbcf00f 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -1,20 +1,15 @@ +//go:build (darwin || freebsd || openbsd || netbsd || dragonfly) && !appengine // +build darwin freebsd openbsd netbsd dragonfly // +build !appengine package isatty -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TIOCGETA +import "golang.org/x/sys/unix" // IsTerminal return true if the file descriptor is terminal. func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 + _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA) + return err == nil } // IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go index ff714a376..31503226f 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_others.go +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -1,4 +1,5 @@ -// +build appengine js nacl +//go:build appengine || js || nacl || wasm +// +build appengine js nacl wasm package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go index bc0a70920..bae7f9bb3 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_plan9.go +++ b/vendor/github.com/mattn/go-isatty/isatty_plan9.go @@ -1,3 +1,4 @@ +//go:build plan9 // +build plan9 package isatty @@ -8,7 +9,7 @@ import ( // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { - path, err := syscall.Fd2path(fd) + path, err := syscall.Fd2path(int(fd)) if err != nil { return false } diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go index bdd5c79a0..0c3acf2dc 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_solaris.go +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -1,5 +1,5 @@ -// +build solaris -// +build !appengine +//go:build solaris && !appengine +// +build solaris,!appengine package isatty @@ -8,10 +8,9 @@ import ( ) // IsTerminal returns true if the given file descriptor is a terminal. -// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +// see: https://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libc/port/gen/isatty.c func IsTerminal(fd uintptr) bool { - var termio unix.Termio - err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + _, err := unix.IoctlGetTermio(int(fd), unix.TCGETA) return err == nil } diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go index 453b025d0..67787657f 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -1,6 +1,6 @@ -// +build linux aix +//go:build (linux || aix || zos) && !appengine +// +build linux aix zos // +build !appengine -// +build !android package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go index 1fa869154..8e3c99171 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_windows.go +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -1,5 +1,5 @@ -// +build windows -// +build !appengine +//go:build windows && !appengine +// +build windows,!appengine package isatty @@ -76,7 +76,7 @@ func isCygwinPipeName(name string) bool { } // getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler -// since GetFileInformationByHandleEx is not avilable under windows Vista and still some old fashion +// since GetFileInformationByHandleEx is not available under windows Vista and still some old fashion // guys are using Windows XP, this is a workaround for those guys, it will also work on system from // Windows vista to 10 // see https://stackoverflow.com/a/18792477 for details diff --git a/vendor/github.com/mitchellh/go-wordwrap/go.mod b/vendor/github.com/mitchellh/go-wordwrap/go.mod index 2ae411b20..431d615d4 100644 --- a/vendor/github.com/mitchellh/go-wordwrap/go.mod +++ b/vendor/github.com/mitchellh/go-wordwrap/go.mod @@ -1 +1,3 @@ module github.com/mitchellh/go-wordwrap + +go 1.14 diff --git a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go index ac67205bc..f7bedda38 100644 --- a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go +++ b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go @@ -5,6 +5,8 @@ import ( "unicode" ) +const nbsp = 0xA0 + // WrapString wraps the given string within lim width in characters. // // Wrapping is currently naive and only happens at white-space. A future @@ -18,50 +20,58 @@ func WrapString(s string, lim uint) string { var current uint var wordBuf, spaceBuf bytes.Buffer + var wordBufLen, spaceBufLen uint for _, char := range s { if char == '\n' { if wordBuf.Len() == 0 { - if current+uint(spaceBuf.Len()) > lim { + if current+spaceBufLen > lim { current = 0 } else { - current += uint(spaceBuf.Len()) + current += spaceBufLen spaceBuf.WriteTo(buf) } spaceBuf.Reset() + spaceBufLen = 0 } else { - current += uint(spaceBuf.Len() + wordBuf.Len()) + current += spaceBufLen + wordBufLen spaceBuf.WriteTo(buf) spaceBuf.Reset() + spaceBufLen = 0 wordBuf.WriteTo(buf) wordBuf.Reset() + wordBufLen = 0 } buf.WriteRune(char) current = 0 - } else if unicode.IsSpace(char) { + } else if unicode.IsSpace(char) && char != nbsp { if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { - current += uint(spaceBuf.Len() + wordBuf.Len()) + current += spaceBufLen + wordBufLen spaceBuf.WriteTo(buf) spaceBuf.Reset() + spaceBufLen = 0 wordBuf.WriteTo(buf) wordBuf.Reset() + wordBufLen = 0 } spaceBuf.WriteRune(char) + spaceBufLen++ } else { - wordBuf.WriteRune(char) + wordBufLen++ - if current+uint(spaceBuf.Len()+wordBuf.Len()) > lim && uint(wordBuf.Len()) < lim { + if current+wordBufLen+spaceBufLen > lim && wordBufLen < lim { buf.WriteRune('\n') current = 0 spaceBuf.Reset() + spaceBufLen = 0 } } } if wordBuf.Len() == 0 { - if current+uint(spaceBuf.Len()) <= lim { + if current+spaceBufLen <= lim { spaceBuf.WriteTo(buf) } } else { diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml deleted file mode 100644 index 1689c7d73..000000000 --- a/vendor/github.com/mitchellh/mapstructure/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -go: - - "1.11.x" - - tip - -script: - - go test diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md index 3b3cb723f..c75823490 100644 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -1,3 +1,78 @@ +## 1.5.0 + +* New option `IgnoreUntaggedFields` to ignore decoding to any fields + without `mapstructure` (or the configured tag name) set [GH-277] +* New option `ErrorUnset` which makes it an error if any fields + in a target struct are not set by the decoding process. [GH-225] +* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] +* Decoding to slice from array no longer crashes [GH-265] +* Decode nested struct pointers to map [GH-271] +* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] +* Fix issue where fields with `,omitempty` would sometimes decode + into a map with an empty string key [GH-281] + +## 1.4.3 + +* Fix cases where `json.Number` didn't decode properly [GH-261] + +## 1.4.2 + +* Custom name matchers to support any sort of casing, formatting, etc. for + field names. [GH-250] +* Fix possible panic in ComposeDecodeHookFunc [GH-251] + +## 1.4.1 + +* Fix regression where `*time.Time` value would be set to empty and not be sent + to decode hooks properly [GH-232] + +## 1.4.0 + +* A new decode hook type `DecodeHookFuncValue` has been added that has + access to the full values. [GH-183] +* Squash is now supported with embedded fields that are struct pointers [GH-205] +* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] + +## 1.3.3 + +* Decoding maps from maps creates a settable value for decode hooks [GH-203] + +## 1.3.2 + +* Decode into interface type with a struct value is supported [GH-187] + +## 1.3.1 + +* Squash should only squash embedded structs. [GH-194] + +## 1.3.0 + +* Added `",omitempty"` support. This will ignore zero values in the source + structure when encoding. [GH-145] + +## 1.2.3 + +* Fix duplicate entries in Keys list with pointer values. [GH-185] + +## 1.2.2 + +* Do not add unsettable (unexported) values to the unused metadata key + or "remain" value. [GH-150] + +## 1.2.1 + +* Go modules checksum mismatch fix + +## 1.2.0 + +* Added support to capture unused values in a field using the `",remain"` value + in the mapstructure tag. There is an example to showcase usage. +* Added `DecoderConfig` option to always squash embedded structs +* `json.Number` can decode into `uint` types +* Empty slices are preserved and not replaced with nil slices +* Fix panic that can occur in when decoding a map into a nil slice of structs +* Improved package documentation for godoc + ## 1.1.2 * Fix error when decode hook decodes interface implementation into interface diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go index 1f0abc65a..3a754ca72 100644 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -1,6 +1,7 @@ package mapstructure import ( + "encoding" "errors" "fmt" "net" @@ -16,10 +17,11 @@ func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { // Create variables here so we can reference them with the reflect pkg var f1 DecodeHookFuncType var f2 DecodeHookFuncKind + var f3 DecodeHookFuncValue // Fill in the variables into this interface and the rest is done // automatically using the reflect package. - potential := []interface{}{f1, f2} + potential := []interface{}{f1, f2, f3} v := reflect.ValueOf(h) vt := v.Type() @@ -38,13 +40,15 @@ func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { // that took reflect.Kind instead of reflect.Type. func DecodeHookExec( raw DecodeHookFunc, - from reflect.Type, to reflect.Type, - data interface{}) (interface{}, error) { + from reflect.Value, to reflect.Value) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { case DecodeHookFuncType: - return f(from, to, data) + return f(from.Type(), to.Type(), from.Interface()) case DecodeHookFuncKind: - return f(from.Kind(), to.Kind(), data) + return f(from.Kind(), to.Kind(), from.Interface()) + case DecodeHookFuncValue: + return f(from, to) default: return nil, errors.New("invalid decode hook signature") } @@ -56,25 +60,42 @@ func DecodeHookExec( // The composed funcs are called in order, with the result of the // previous transformation. func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { var err error + data := f.Interface() + + newFrom := f for _, f1 := range fs { - data, err = DecodeHookExec(f1, f, t, data) + data, err = DecodeHookExec(f1, newFrom, t) if err != nil { return nil, err } + newFrom = reflect.ValueOf(data) + } - // Modify the from kind to be correct with the new data - f = nil - if val := reflect.ValueOf(data); val.IsValid() { - f = val.Type() + return data, nil + } +} + +// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. +// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. +func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { + return func(a, b reflect.Value) (interface{}, error) { + var allErrs string + var out interface{} + var err error + + for _, f := range ff { + out, err = DecodeHookExec(f, a, b) + if err != nil { + allErrs += err.Error() + "\n" + continue } + + return out, nil } - return data, nil + return nil, errors.New(allErrs) } } @@ -215,3 +236,44 @@ func WeaklyTypedHook( return data, nil } + +func RecursiveStructToMapHookFunc() DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + if f.Kind() != reflect.Struct { + return f.Interface(), nil + } + + var i interface{} = struct{}{} + if t.Type() != reflect.TypeOf(&i).Elem() { + return f.Interface(), nil + } + + m := make(map[string]interface{}) + t.Set(reflect.ValueOf(m)) + + return f.Interface(), nil + } +} + +// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies +// strings to the UnmarshalText function, when the target type +// implements the encoding.TextUnmarshaler interface +func TextUnmarshallerHookFunc() DecodeHookFuncType { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + result := reflect.New(t).Interface() + unmarshaller, ok := result.(encoding.TextUnmarshaler) + if !ok { + return data, nil + } + if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { + return nil, err + } + return result, nil + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/go.mod b/vendor/github.com/mitchellh/mapstructure/go.mod index d2a712562..a03ae9730 100644 --- a/vendor/github.com/mitchellh/mapstructure/go.mod +++ b/vendor/github.com/mitchellh/mapstructure/go.mod @@ -1 +1,3 @@ module github.com/mitchellh/mapstructure + +go 1.14 diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 256ee63fb..1efb22ac3 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -1,10 +1,161 @@ -// Package mapstructure exposes functionality to convert an arbitrary -// map[string]interface{} into a native Go structure. +// Package mapstructure exposes functionality to convert one arbitrary +// Go type into another, typically to convert a map[string]interface{} +// into a native Go structure. // // The Go structure can be arbitrarily complex, containing slices, // other structs, etc. and the decoder will properly decode nested // maps and so on into the proper structures in the native Go struct. // See the examples to see what the decoder is capable of. +// +// The simplest function to start with is Decode. +// +// Field Tags +// +// When decoding to a struct, mapstructure will use the field name by +// default to perform the mapping. For example, if a struct has a field +// "Username" then mapstructure will look for a key in the source value +// of "username" (case insensitive). +// +// type User struct { +// Username string +// } +// +// You can change the behavior of mapstructure by using struct tags. +// The default struct tag that mapstructure looks for is "mapstructure" +// but you can customize it using DecoderConfig. +// +// Renaming Fields +// +// To rename the key that mapstructure looks for, use the "mapstructure" +// tag and set a value directly. For example, to change the "username" example +// above to "user": +// +// type User struct { +// Username string `mapstructure:"user"` +// } +// +// Embedded Structs and Squashing +// +// Embedded structs are treated as if they're another field with that name. +// By default, the two structs below are equivalent when decoding with +// mapstructure: +// +// type Person struct { +// Name string +// } +// +// type Friend struct { +// Person +// } +// +// type Friend struct { +// Person Person +// } +// +// This would require an input that looks like below: +// +// map[string]interface{}{ +// "person": map[string]interface{}{"name": "alice"}, +// } +// +// If your "person" value is NOT nested, then you can append ",squash" to +// your tag value and mapstructure will treat it as if the embedded struct +// were part of the struct directly. Example: +// +// type Friend struct { +// Person `mapstructure:",squash"` +// } +// +// Now the following input would be accepted: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// When decoding from a struct to a map, the squash tag squashes the struct +// fields into a single map. Using the example structs from above: +// +// Friend{Person: Person{Name: "alice"}} +// +// Will be decoded into a map: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// DecoderConfig has a field that changes the behavior of mapstructure +// to always squash embedded structs. +// +// Remainder Values +// +// If there are any unmapped keys in the source value, mapstructure by +// default will silently ignore them. You can error by setting ErrorUnused +// in DecoderConfig. If you're using Metadata you can also maintain a slice +// of the unused keys. +// +// You can also use the ",remain" suffix on your tag to collect all unused +// values in a map. The field with this tag MUST be a map type and should +// probably be a "map[string]interface{}" or "map[interface{}]interface{}". +// See example below: +// +// type Friend struct { +// Name string +// Other map[string]interface{} `mapstructure:",remain"` +// } +// +// Given the input below, Other would be populated with the other +// values that weren't used (everything but "name"): +// +// map[string]interface{}{ +// "name": "bob", +// "address": "123 Maple St.", +// } +// +// Omit Empty Values +// +// When decoding from a struct to any other value, you may use the +// ",omitempty" suffix on your tag to omit that value if it equates to +// the zero value. The zero value of all types is specified in the Go +// specification. +// +// For example, the zero type of a numeric type is zero ("0"). If the struct +// field value is zero and a numeric type, the field is empty, and it won't +// be encoded into the destination type. +// +// type Source struct { +// Age int `mapstructure:",omitempty"` +// } +// +// Unexported fields +// +// Since unexported (private) struct fields cannot be set outside the package +// where they are defined, the decoder will simply skip them. +// +// For this output type definition: +// +// type Exported struct { +// private string // this unexported field will be skipped +// Public string +// } +// +// Using this map as input: +// +// map[string]interface{}{ +// "private": "I will be ignored", +// "Public": "I made it through!", +// } +// +// The following struct will be decoded: +// +// type Exported struct { +// private: "" // field is left with an empty string (zero value) +// Public: "I made it through!" +// } +// +// Other Configuration +// +// mapstructure is highly configurable. See the DecoderConfig struct +// for other features and options that are supported. package mapstructure import ( @@ -21,10 +172,11 @@ import ( // data transformations. See "DecodeHook" in the DecoderConfig // struct. // -// The type should be DecodeHookFuncType or DecodeHookFuncKind. -// Either is accepted. Types are a superset of Kinds (Types can return -// Kinds) and are generally a richer thing to use, but Kinds are simpler -// if you only need those. +// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or +// DecodeHookFuncValue. +// Values are a superset of Types (Values can return types), and Types are a +// superset of Kinds (Types can return Kinds) and are generally a richer thing +// to use, but Kinds are simpler if you only need those. // // The reason DecodeHookFunc is multi-typed is for backwards compatibility: // we started with Kinds and then realized Types were the better solution, @@ -40,15 +192,22 @@ type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface // source and target types. type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) +// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target +// values. +type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) + // DecoderConfig is the configuration that is used to create a new decoder // and allows customization of various aspects of decoding. type DecoderConfig struct { // DecodeHook, if set, will be called before any decoding and any // type conversion (if WeaklyTypedInput is on). This lets you modify - // the values before they're set down onto the resulting struct. + // the values before they're set down onto the resulting struct. The + // DecodeHook is called for every map and value in the input. This means + // that if a struct has embedded fields with squash tags the decode hook + // is called only once with all of the input data, not once for each + // embedded struct. // - // If an error is returned, the entire decode will fail with that - // error. + // If an error is returned, the entire decode will fail with that error. DecodeHook DecodeHookFunc // If ErrorUnused is true, then it is an error for there to exist @@ -56,6 +215,12 @@ type DecoderConfig struct { // (extra keys). ErrorUnused bool + // If ErrorUnset is true, then it is an error for there to exist + // fields in the result that were not set in the decoding process + // (extra fields). This only applies to decoding to a struct. This + // will affect all nested structs as well. + ErrorUnset bool + // ZeroFields, if set to true, will zero fields before writing them. // For example, a map will be emptied before decoded values are put in // it. If this is false, a map will be merged. @@ -80,6 +245,14 @@ type DecoderConfig struct { // WeaklyTypedInput bool + // Squash will squash embedded structs. A squash tag may also be + // added to an individual struct field using a tag. For example: + // + // type Parent struct { + // Child `mapstructure:",squash"` + // } + Squash bool + // Metadata is the struct that will contain extra metadata about // the decoding. If this is nil, then no metadata will be tracked. Metadata *Metadata @@ -91,6 +264,15 @@ type DecoderConfig struct { // The tag name that mapstructure reads for field names. This // defaults to "mapstructure" TagName string + + // IgnoreUntaggedFields ignores all struct fields without explicit + // TagName, comparable to `mapstructure:"-"` as default behaviour. + IgnoreUntaggedFields bool + + // MatchName is the function used to match the map key to the struct + // field name or tag. Defaults to `strings.EqualFold`. This can be used + // to implement case-sensitive tag values, support snake casing, etc. + MatchName func(mapKey, fieldName string) bool } // A Decoder takes a raw interface value and turns it into structured @@ -112,6 +294,11 @@ type Metadata struct { // Unused is a slice of keys that were found in the raw value but // weren't decoded since there was no matching field in the result interface Unused []string + + // Unset is a slice of field names that were found in the result interface + // but weren't set in the decoding process since there was no matching value + // in the input + Unset []string } // Decode takes an input structure and uses reflection to translate it to @@ -203,12 +390,20 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { if config.Metadata.Unused == nil { config.Metadata.Unused = make([]string, 0) } + + if config.Metadata.Unset == nil { + config.Metadata.Unset = make([]string, 0) + } } if config.TagName == "" { config.TagName = "mapstructure" } + if config.MatchName == nil { + config.MatchName = strings.EqualFold + } + result := &Decoder{ config: config, } @@ -261,9 +456,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e if d.config.DecodeHook != nil { // We have a DecodeHook, so let's pre-process the input. var err error - input, err = DecodeHookExec( - d.config.DecodeHook, - inputVal.Type(), outVal.Type(), input) + input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) if err != nil { return fmt.Errorf("error decoding '%s': %s", name, err) } @@ -271,6 +464,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e var err error outputKind := getKind(outVal) + addMetaKey := true switch outputKind { case reflect.Bool: err = d.decodeBool(name, input, outVal) @@ -289,7 +483,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e case reflect.Map: err = d.decodeMap(name, input, outVal) case reflect.Ptr: - err = d.decodePtr(name, input, outVal) + addMetaKey, err = d.decodePtr(name, input, outVal) case reflect.Slice: err = d.decodeSlice(name, input, outVal) case reflect.Array: @@ -303,7 +497,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e // If we reached here, then we successfully decoded SOMETHING, so // mark the key as used if we're tracking metainput. - if d.config.Metadata != nil && name != "" { + if addMetaKey && d.config.Metadata != nil && name != "" { d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) } @@ -314,7 +508,34 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e // value to "data" of that type. func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { if val.IsValid() && val.Elem().IsValid() { - return d.decode(name, data, val.Elem()) + elem := val.Elem() + + // If we can't address this element, then its not writable. Instead, + // we make a copy of the value (which is a pointer and therefore + // writable), decode into that, and replace the whole value. + copied := false + if !elem.CanAddr() { + copied = true + + // Make *T + copy := reflect.New(elem.Type()) + + // *T = elem + copy.Elem().Set(elem) + + // Set elem so we decode into it + elem = copy + } + + // Decode. If we have an error then return. We also return right + // away if we're not a copy because that means we decoded directly. + if err := d.decode(name, data, elem); err != nil || !copied { + return err + } + + // If we're a copy, we need to set te final result + val.Set(elem.Elem()) + return nil } dataVal := reflect.ValueOf(data) @@ -386,8 +607,8 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) if !converted { return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } return nil @@ -412,7 +633,12 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er val.SetInt(0) } case dataKind == reflect.String && d.config.WeaklyTypedInput: - i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseInt(str, 0, val.Type().Bits()) if err == nil { val.SetInt(i) } else { @@ -428,8 +654,8 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er val.SetInt(i) default: return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } return nil @@ -438,6 +664,7 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { dataVal := reflect.Indirect(reflect.ValueOf(data)) dataKind := getKind(dataVal) + dataType := dataVal.Type() switch { case dataKind == reflect.Int: @@ -463,16 +690,29 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e val.SetUint(0) } case dataKind == reflect.String && d.config.WeaklyTypedInput: - i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseUint(str, 0, val.Type().Bits()) if err == nil { val.SetUint(i) } else { return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := strconv.ParseUint(string(jn), 0, 64) + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetUint(i) default: return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } return nil @@ -502,8 +742,8 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e } default: return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } return nil @@ -528,7 +768,12 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) val.SetFloat(0) } case dataKind == reflect.String && d.config.WeaklyTypedInput: - f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) + str := dataVal.String() + if str == "" { + str = "0" + } + + f, err := strconv.ParseFloat(str, val.Type().Bits()) if err == nil { val.SetFloat(f) } else { @@ -544,8 +789,8 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) val.SetFloat(i) default: return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } return nil @@ -596,7 +841,7 @@ func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val ref for i := 0; i < dataVal.Len(); i++ { err := d.decode( - fmt.Sprintf("%s[%d]", name, i), + name+"["+strconv.Itoa(i)+"]", dataVal.Index(i).Interface(), val) if err != nil { return err @@ -629,7 +874,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle } for _, k := range dataVal.MapKeys() { - fieldName := fmt.Sprintf("%s[%s]", name, k) + fieldName := name + "[" + k.String() + "]" // First decode the key into the proper type currentKey := reflect.Indirect(reflect.New(valKeyType)) @@ -678,27 +923,48 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re } tagValue := f.Tag.Get(d.config.TagName) - tagParts := strings.Split(tagValue, ",") + keyName := f.Name + + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + + // If Squash is set in the config, we squash the field down. + squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + + v = dereferencePtrToStructIfNeeded(v, d.config.TagName) // Determine the name of the key in the map - keyName := f.Name - if tagParts[0] != "" { - if tagParts[0] == "-" { + if index := strings.Index(tagValue, ","); index != -1 { + if tagValue[:index] == "-" { + continue + } + // If "omitempty" is specified in the tag, it ignores empty values. + if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { continue } - keyName = tagParts[0] - } - // If "squash" is specified in the tag, we squash the field down. - squash := false - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break + // If "squash" is specified in the tag, we squash the field down. + squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 + if squash { + // When squashing, the embedded type can be a pointer to a struct. + if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { + v = v.Elem() + } + + // The final type must be a struct + if v.Kind() != reflect.Struct { + return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + } } - } - if squash && v.Kind() != reflect.Struct { - return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { + keyName = keyNameTagValue + } + } else if len(tagValue) > 0 { + if tagValue == "-" { + continue + } + keyName = tagValue } switch v.Kind() { @@ -713,11 +979,22 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re mType := reflect.MapOf(vKeyType, vElemType) vMap := reflect.MakeMap(mType) - err := d.decode(keyName, x.Interface(), vMap) + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(vMap.Type()) + reflect.Indirect(addrVal).Set(vMap) + + err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) if err != nil { return err } + // the underlying map may have been completely overwritten so pull + // it indirectly out of the enclosing value. + vMap = reflect.Indirect(addrVal) + if squash { for _, k := range vMap.MapKeys() { valMap.SetMapIndex(k, vMap.MapIndex(k)) @@ -738,7 +1015,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re return nil } -func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { // If the input data is nil, then we want to just set the output // pointer to be nil as well. isNil := data == nil @@ -759,7 +1036,7 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er val.Set(nilValue) } - return nil + return true, nil } // Create an element of the concrete (non pointer) type and decode @@ -773,16 +1050,16 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er } if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { - return err + return false, err } val.Set(realVal) } else { if err := d.decode(name, data, reflect.Indirect(val)); err != nil { - return err + return false, err } } - return nil + return false, nil } func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { @@ -791,8 +1068,8 @@ func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) e dataVal := reflect.Indirect(reflect.ValueOf(data)) if val.Type() != dataVal.Type() { return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } val.Set(dataVal) return nil @@ -805,8 +1082,8 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) valElemType := valType.Elem() sliceType := reflect.SliceOf(valElemType) - valSlice := val - if valSlice.IsNil() || d.config.ZeroFields { + // If we have a non array/slice type then we first attempt to convert. + if dataValKind != reflect.Array && dataValKind != reflect.Slice { if d.config.WeaklyTypedInput { switch { // Slice and array we use the normal logic @@ -833,18 +1110,17 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) } } - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - - } + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } - // If the input value is empty, then don't allocate since non-nil != nil - if dataVal.Len() == 0 { - return nil - } + // If the input value is nil, then don't allocate since empty != nil + if dataValKind != reflect.Array && dataVal.IsNil() { + return nil + } + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { // Make a new slice to hold our result, same size as the original data. valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) } @@ -859,7 +1135,7 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) } currentField := valSlice.Index(i) - fieldName := fmt.Sprintf("%s[%d]", name, i) + fieldName := name + "[" + strconv.Itoa(i) + "]" if err := d.decode(fieldName, currentData, currentField); err != nil { errors = appendErrors(errors, err) } @@ -926,7 +1202,7 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) currentData := dataVal.Index(i).Interface() currentField := valArray.Index(i) - fieldName := fmt.Sprintf("%s[%d]", name, i) + fieldName := name + "[" + strconv.Itoa(i) + "]" if err := d.decode(fieldName, currentData, currentField); err != nil { errors = appendErrors(errors, err) } @@ -962,13 +1238,23 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) // Not the most efficient way to do this but we can optimize later if // we want to. To convert from struct to struct we go to map first // as an intermediary. - m := make(map[string]interface{}) - mval := reflect.Indirect(reflect.ValueOf(&m)) - if err := d.decodeMapFromStruct(name, dataVal, mval, mval); err != nil { + + // Make a new map to hold our result + mapType := reflect.TypeOf((map[string]interface{})(nil)) + mval := reflect.MakeMap(mapType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(mval.Type()) + + reflect.Indirect(addrVal).Set(mval) + if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { return err } - result := d.decodeStructFromMap(name, mval, val) + result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) return result default: @@ -991,6 +1277,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e dataValKeysUnused[dataValKey.Interface()] = struct{}{} } + targetValKeysUnused := make(map[interface{}]struct{}) errors := make([]string, 0) // This slice will keep track of all the structs we'll be decoding. @@ -1005,6 +1292,11 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e field reflect.StructField val reflect.Value } + + // remainField is set to a valid field set with the "remain" tag if + // we are keeping track of remaining values. + var remainField *field + fields := []field{} for len(structs) > 0 { structVal := structs[0] @@ -1014,30 +1306,47 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e for i := 0; i < structType.NumField(); i++ { fieldType := structType.Field(i) - fieldKind := fieldType.Type.Kind() + fieldVal := structVal.Field(i) + if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { + // Handle embedded struct pointers as embedded structs. + fieldVal = fieldVal.Elem() + } // If "squash" is specified in the tag, we squash the field down. - squash := false + squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous + remain := false + + // We always parse the tags cause we're looking for other tags too tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") for _, tag := range tagParts[1:] { if tag == "squash" { squash = true break } + + if tag == "remain" { + remain = true + break + } } if squash { - if fieldKind != reflect.Struct { + if fieldVal.Kind() != reflect.Struct { errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) } else { - structs = append(structs, structVal.FieldByName(fieldType.Name)) + structs = append(structs, fieldVal) } continue } - // Normal struct field, store it away - fields = append(fields, field{fieldType, structVal.Field(i)}) + // Build our field + if remain { + remainField = &field{fieldType, fieldVal} + } else { + // Normal struct field, store it away + fields = append(fields, field{fieldType, fieldVal}) + } } } @@ -1064,7 +1373,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e continue } - if strings.EqualFold(mK, fieldName) { + if d.config.MatchName(mK, fieldName) { rawMapKey = dataValKey rawMapVal = dataVal.MapIndex(dataValKey) break @@ -1073,14 +1382,12 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e if !rawMapVal.IsValid() { // There was no matching key in the map for the value in - // the struct. Just ignore. + // the struct. Remember it for potential errors and metadata. + targetValKeysUnused[fieldName] = struct{}{} continue } } - // Delete the key we're using from the unused map so we stop tracking - delete(dataValKeysUnused, rawMapKey.Interface()) - if !fieldValue.IsValid() { // This should never happen panic("field is not valid") @@ -1092,10 +1399,13 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e continue } + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + // If the name is empty string, then we're at the root, and we // don't dot-join the fields. if name != "" { - fieldName = fmt.Sprintf("%s.%s", name, fieldName) + fieldName = name + "." + fieldName } if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { @@ -1103,6 +1413,25 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } } + // If we have a "remain"-tagged field and we have unused keys then + // we put the unused keys directly into the remain field. + if remainField != nil && len(dataValKeysUnused) > 0 { + // Build a map of only the unused values + remain := map[interface{}]interface{}{} + for key := range dataValKeysUnused { + remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() + } + + // Decode it as-if we were just decoding this map onto our map. + if err := d.decodeMap(name, remain, remainField.val); err != nil { + errors = appendErrors(errors, err) + } + + // Set the map to nil so we have none so that the next check will + // not error (ErrorUnused) + dataValKeysUnused = nil + } + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { keys := make([]string, 0, len(dataValKeysUnused)) for rawKey := range dataValKeysUnused { @@ -1114,6 +1443,17 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e errors = appendErrors(errors, err) } + if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { + keys := make([]string, 0, len(targetValKeysUnused)) + for rawKey := range targetValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + if len(errors) > 0 { return &Error{errors} } @@ -1123,16 +1463,42 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e for rawKey := range dataValKeysUnused { key := rawKey.(string) if name != "" { - key = fmt.Sprintf("%s.%s", name, key) + key = name + "." + key } d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) } + for rawKey := range targetValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) + } } return nil } +func isEmptyValue(v reflect.Value) bool { + switch getKind(v) { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + func getKind(val reflect.Value) reflect.Kind { kind := val.Kind() @@ -1147,3 +1513,28 @@ func getKind(val reflect.Value) reflect.Kind { return kind } } + +func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields + return true + } + if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside + return true + } + } + return false +} + +func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return v + } + deref := v.Elem() + derefT := deref.Type() + if isStructTypeConvertibleToMap(derefT, true, tagName) { + return deref + } + return v +} diff --git a/vendor/github.com/oklog/run/.travis.yml b/vendor/github.com/oklog/run/.travis.yml deleted file mode 100644 index 362bdd41c..000000000 --- a/vendor/github.com/oklog/run/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -sudo: false -go: - - 1.x - - tip -install: - - go get -v github.com/golang/lint/golint - - go build ./... -script: - - go vet ./... - - $HOME/gopath/bin/golint . - - go test -v -race ./... diff --git a/vendor/github.com/oklog/run/README.md b/vendor/github.com/oklog/run/README.md index a7228cd9a..eba7d11cf 100644 --- a/vendor/github.com/oklog/run/README.md +++ b/vendor/github.com/oklog/run/README.md @@ -1,7 +1,7 @@ # run [![GoDoc](https://godoc.org/github.com/oklog/run?status.svg)](https://godoc.org/github.com/oklog/run) -[![Build Status](https://travis-ci.org/oklog/run.svg?branch=master)](https://travis-ci.org/oklog/run) +[![Build Status](https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2Foklog%2Frun%2Fbadge&style=flat-square&label=build)](https://github.com/oklog/run/actions?query=workflow%3ATest) [![Go Report Card](https://goreportcard.com/badge/github.com/oklog/run)](https://goreportcard.com/report/github.com/oklog/run) [![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/run/master/LICENSE) @@ -10,9 +10,11 @@ run.Group is a universal mechanism to manage goroutine lifecycles. Create a zero-value run.Group, and then add actors to it. Actors are defined as a pair of functions: an **execute** function, which should run synchronously; and an **interrupt** function, which, when invoked, should cause the execute -function to return. Finally, invoke Run, which blocks until the first actor -returns. This general-purpose API allows callers to model pretty much any -runnable task, and achieve well-defined lifecycle semantics for the group. +function to return. Finally, invoke Run, which concurrently runs all of the +actors, waits until the first actor exits, invokes the interrupt functions, and +finally returns control to the caller only once all actors have returned. This +general-purpose API allows callers to model pretty much any runnable task, and +achieve well-defined lifecycle semantics for the group. run.Group was written to manage component lifecycles in func main for [OK Log](https://github.com/oklog/oklog). diff --git a/vendor/github.com/oklog/run/actors.go b/vendor/github.com/oklog/run/actors.go new file mode 100644 index 000000000..ef93495d3 --- /dev/null +++ b/vendor/github.com/oklog/run/actors.go @@ -0,0 +1,38 @@ +package run + +import ( + "context" + "fmt" + "os" + "os/signal" +) + +// SignalHandler returns an actor, i.e. an execute and interrupt func, that +// terminates with SignalError when the process receives one of the provided +// signals, or the parent context is canceled. +func SignalHandler(ctx context.Context, signals ...os.Signal) (execute func() error, interrupt func(error)) { + ctx, cancel := context.WithCancel(ctx) + return func() error { + c := make(chan os.Signal, 1) + signal.Notify(c, signals...) + select { + case sig := <-c: + return SignalError{Signal: sig} + case <-ctx.Done(): + return ctx.Err() + } + }, func(error) { + cancel() + } +} + +// SignalError is returned by the signal handler's execute function +// when it terminates due to a received signal. +type SignalError struct { + Signal os.Signal +} + +// Error implements the error interface. +func (e SignalError) Error() string { + return fmt.Sprintf("received signal %s", e.Signal) +} diff --git a/vendor/github.com/oklog/run/go.mod b/vendor/github.com/oklog/run/go.mod new file mode 100644 index 000000000..b1297232e --- /dev/null +++ b/vendor/github.com/oklog/run/go.mod @@ -0,0 +1,3 @@ +module github.com/oklog/run + +go 1.13 diff --git a/vendor/github.com/vmihailenco/msgpack/v4/.golangci.yml b/vendor/github.com/vmihailenco/msgpack/v4/.golangci.yml new file mode 100644 index 000000000..98d6cb779 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/.golangci.yml @@ -0,0 +1,12 @@ +run: + concurrency: 8 + deadline: 5m + tests: false +linters: + enable-all: true + disable: + - gochecknoglobals + - gocognit + - godox + - wsl + - funlen diff --git a/vendor/github.com/vmihailenco/msgpack/v4/.travis.yml b/vendor/github.com/vmihailenco/msgpack/v4/.travis.yml new file mode 100644 index 000000000..b35bf5484 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/.travis.yml @@ -0,0 +1,21 @@ +sudo: false +language: go + +go: + - 1.11.x + - 1.12.x + - 1.13.x + - 1.14.x + - tip + +matrix: + allow_failures: + - go: tip + +env: + - GO111MODULE=on + +go_import_path: github.com/vmihailenco/msgpack + +before_install: + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.21.0 diff --git a/vendor/github.com/vmihailenco/msgpack/v4/CHANGELOG.md b/vendor/github.com/vmihailenco/msgpack/v4/CHANGELOG.md new file mode 100644 index 000000000..fac97090e --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/CHANGELOG.md @@ -0,0 +1,24 @@ +## v4 + +- Encode, Decode, Marshal, and Unmarshal are changed to accept single argument. EncodeMulti and DecodeMulti are added as replacement. +- Added EncodeInt8/16/32/64 and EncodeUint8/16/32/64. +- Encoder changed to preserve type of numbers instead of chosing most compact encoding. The old behavior can be achieved with Encoder.UseCompactEncoding. + +## v3.3 + +- `msgpack:",inline"` tag is restored to force inlining structs. + +## v3.2 + +- Decoding extension types returns pointer to the value instead of the value. Fixes #153 + +## v3 + +- gopkg.in is not supported any more. Update import path to github.com/vmihailenco/msgpack. +- Msgpack maps are decoded into map[string]interface{} by default. +- EncodeSliceLen is removed in favor of EncodeArrayLen. DecodeSliceLen is removed in favor of DecodeArrayLen. +- Embedded structs are automatically inlined where possible. +- Time is encoded using extension as described in https://github.com/msgpack/msgpack/pull/209. Old format is supported as well. +- EncodeInt8/16/32/64 is replaced with EncodeInt. EncodeUint8/16/32/64 is replaced with EncodeUint. There should be no performance differences. +- DecodeInterface can now return int8/16/32 and uint8/16/32. +- PeekCode returns codes.Code instead of byte. diff --git a/vendor/github.com/vmihailenco/msgpack/v4/LICENSE b/vendor/github.com/vmihailenco/msgpack/v4/LICENSE new file mode 100644 index 000000000..b749d0707 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013 The github.com/vmihailenco/msgpack Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vmihailenco/msgpack/v4/Makefile b/vendor/github.com/vmihailenco/msgpack/v4/Makefile new file mode 100644 index 000000000..57914e333 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/Makefile @@ -0,0 +1,6 @@ +all: + go test ./... + go test ./... -short -race + go test ./... -run=NONE -bench=. -benchmem + env GOOS=linux GOARCH=386 go test ./... + golangci-lint run diff --git a/vendor/github.com/vmihailenco/msgpack/v4/README.md b/vendor/github.com/vmihailenco/msgpack/v4/README.md new file mode 100644 index 000000000..a5b1004e0 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/README.md @@ -0,0 +1,72 @@ +# MessagePack encoding for Golang + +[![Build Status](https://travis-ci.org/vmihailenco/msgpack.svg?branch=v2)](https://travis-ci.org/vmihailenco/msgpack) +[![GoDoc](https://godoc.org/github.com/vmihailenco/msgpack?status.svg)](https://godoc.org/github.com/vmihailenco/msgpack) + +Supports: +- Primitives, arrays, maps, structs, time.Time and interface{}. +- Appengine *datastore.Key and datastore.Cursor. +- [CustomEncoder](https://godoc.org/github.com/vmihailenco/msgpack#example-CustomEncoder)/CustomDecoder interfaces for custom encoding. +- [Extensions](https://godoc.org/github.com/vmihailenco/msgpack#example-RegisterExt) to encode type information. +- Renaming fields via `msgpack:"my_field_name"` and alias via `msgpack:"alias:another_name"`. +- Omitting individual empty fields via `msgpack:",omitempty"` tag or all [empty fields in a struct](https://godoc.org/github.com/vmihailenco/msgpack#example-Marshal--OmitEmpty). +- [Map keys sorting](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.SortMapKeys). +- Encoding/decoding all [structs as arrays](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.UseArrayForStructs) or [individual structs](https://godoc.org/github.com/vmihailenco/msgpack#example-Marshal--AsArray). +- [Encoder.UseJSONTag](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.UseJSONTag) with [Decoder.UseJSONTag](https://godoc.org/github.com/vmihailenco/msgpack#Decoder.UseJSONTag) can turn msgpack into drop-in replacement for JSON. +- Simple but very fast and efficient [queries](https://godoc.org/github.com/vmihailenco/msgpack#example-Decoder-Query). + +API docs: https://godoc.org/github.com/vmihailenco/msgpack. +Examples: https://godoc.org/github.com/vmihailenco/msgpack#pkg-examples. + +## Installation + +This project uses [Go Modules](https://github.com/golang/go/wiki/Modules) and semantic import versioning since v4: + +``` shell +go mod init github.com/my/repo +go get github.com/vmihailenco/msgpack/v4 +``` + +## Quickstart + +``` go +import "github.com/vmihailenco/msgpack/v4" + +func ExampleMarshal() { + type Item struct { + Foo string + } + + b, err := msgpack.Marshal(&Item{Foo: "bar"}) + if err != nil { + panic(err) + } + + var item Item + err = msgpack.Unmarshal(b, &item) + if err != nil { + panic(err) + } + fmt.Println(item.Foo) + // Output: bar +} +``` + +## Benchmark + +``` +BenchmarkStructVmihailencoMsgpack-4 200000 12814 ns/op 2128 B/op 26 allocs/op +BenchmarkStructUgorjiGoMsgpack-4 100000 17678 ns/op 3616 B/op 70 allocs/op +BenchmarkStructUgorjiGoCodec-4 100000 19053 ns/op 7346 B/op 23 allocs/op +BenchmarkStructJSON-4 20000 69438 ns/op 7864 B/op 26 allocs/op +BenchmarkStructGOB-4 10000 104331 ns/op 14664 B/op 278 allocs/op +``` + +## Howto + +Please go through [examples](https://godoc.org/github.com/vmihailenco/msgpack#pkg-examples) to get an idea how to use this package. + +## See also + +- [Golang PostgreSQL ORM](https://github.com/go-pg/pg) +- [Golang message task queue](https://github.com/vmihailenco/taskq) diff --git a/vendor/github.com/vmihailenco/msgpack/v4/appengine.go b/vendor/github.com/vmihailenco/msgpack/v4/appengine.go new file mode 100644 index 000000000..e8e91e53f --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/appengine.go @@ -0,0 +1,64 @@ +// +build appengine + +package msgpack + +import ( + "reflect" + + ds "google.golang.org/appengine/datastore" +) + +func init() { + Register((*ds.Key)(nil), encodeDatastoreKeyValue, decodeDatastoreKeyValue) + Register((*ds.Cursor)(nil), encodeDatastoreCursorValue, decodeDatastoreCursorValue) +} + +func EncodeDatastoreKey(e *Encoder, key *ds.Key) error { + if key == nil { + return e.EncodeNil() + } + return e.EncodeString(key.Encode()) +} + +func encodeDatastoreKeyValue(e *Encoder, v reflect.Value) error { + key := v.Interface().(*ds.Key) + return EncodeDatastoreKey(e, key) +} + +func DecodeDatastoreKey(d *Decoder) (*ds.Key, error) { + v, err := d.DecodeString() + if err != nil { + return nil, err + } + if v == "" { + return nil, nil + } + return ds.DecodeKey(v) +} + +func decodeDatastoreKeyValue(d *Decoder, v reflect.Value) error { + key, err := DecodeDatastoreKey(d) + if err != nil { + return err + } + v.Set(reflect.ValueOf(key)) + return nil +} + +func encodeDatastoreCursorValue(e *Encoder, v reflect.Value) error { + cursor := v.Interface().(ds.Cursor) + return e.Encode(cursor.String()) +} + +func decodeDatastoreCursorValue(d *Decoder, v reflect.Value) error { + s, err := d.DecodeString() + if err != nil { + return err + } + cursor, err := ds.DecodeCursor(s) + if err != nil { + return err + } + v.Set(reflect.ValueOf(cursor)) + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/codes/codes.go b/vendor/github.com/vmihailenco/msgpack/v4/codes/codes.go new file mode 100644 index 000000000..28e0a5a88 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/codes/codes.go @@ -0,0 +1,90 @@ +package codes + +type Code byte + +var ( + PosFixedNumHigh Code = 0x7f + NegFixedNumLow Code = 0xe0 + + Nil Code = 0xc0 + + False Code = 0xc2 + True Code = 0xc3 + + Float Code = 0xca + Double Code = 0xcb + + Uint8 Code = 0xcc + Uint16 Code = 0xcd + Uint32 Code = 0xce + Uint64 Code = 0xcf + + Int8 Code = 0xd0 + Int16 Code = 0xd1 + Int32 Code = 0xd2 + Int64 Code = 0xd3 + + FixedStrLow Code = 0xa0 + FixedStrHigh Code = 0xbf + FixedStrMask Code = 0x1f + Str8 Code = 0xd9 + Str16 Code = 0xda + Str32 Code = 0xdb + + Bin8 Code = 0xc4 + Bin16 Code = 0xc5 + Bin32 Code = 0xc6 + + FixedArrayLow Code = 0x90 + FixedArrayHigh Code = 0x9f + FixedArrayMask Code = 0xf + Array16 Code = 0xdc + Array32 Code = 0xdd + + FixedMapLow Code = 0x80 + FixedMapHigh Code = 0x8f + FixedMapMask Code = 0xf + Map16 Code = 0xde + Map32 Code = 0xdf + + FixExt1 Code = 0xd4 + FixExt2 Code = 0xd5 + FixExt4 Code = 0xd6 + FixExt8 Code = 0xd7 + FixExt16 Code = 0xd8 + Ext8 Code = 0xc7 + Ext16 Code = 0xc8 + Ext32 Code = 0xc9 +) + +func IsFixedNum(c Code) bool { + return c <= PosFixedNumHigh || c >= NegFixedNumLow +} + +func IsFixedMap(c Code) bool { + return c >= FixedMapLow && c <= FixedMapHigh +} + +func IsFixedArray(c Code) bool { + return c >= FixedArrayLow && c <= FixedArrayHigh +} + +func IsFixedString(c Code) bool { + return c >= FixedStrLow && c <= FixedStrHigh +} + +func IsString(c Code) bool { + return IsFixedString(c) || c == Str8 || c == Str16 || c == Str32 +} + +func IsBin(c Code) bool { + return c == Bin8 || c == Bin16 || c == Bin32 +} + +func IsFixedExt(c Code) bool { + return c >= FixExt1 && c <= FixExt16 +} + +func IsExt(c Code) bool { + return IsFixedExt(c) || c == Ext8 || c == Ext16 || c == Ext32 +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode.go b/vendor/github.com/vmihailenco/msgpack/v4/decode.go new file mode 100644 index 000000000..1711675e9 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/decode.go @@ -0,0 +1,617 @@ +package msgpack + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "reflect" + "sync" + "time" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +const ( + looseIfaceFlag uint32 = 1 << iota + decodeUsingJSONFlag + disallowUnknownFieldsFlag +) + +const ( + bytesAllocLimit = 1e6 // 1mb + sliceAllocLimit = 1e4 + maxMapSize = 1e6 +) + +type bufReader interface { + io.Reader + io.ByteScanner +} + +//------------------------------------------------------------------------------ + +var decPool = sync.Pool{ + New: func() interface{} { + return NewDecoder(nil) + }, +} + +// Unmarshal decodes the MessagePack-encoded data and stores the result +// in the value pointed to by v. +func Unmarshal(data []byte, v interface{}) error { + dec := decPool.Get().(*Decoder) + + if r, ok := dec.r.(*bytes.Reader); ok { + r.Reset(data) + } else { + dec.Reset(bytes.NewReader(data)) + } + err := dec.Decode(v) + + decPool.Put(dec) + + return err +} + +// A Decoder reads and decodes MessagePack values from an input stream. +type Decoder struct { + r io.Reader + s io.ByteScanner + buf []byte + + extLen int + rec []byte // accumulates read data if not nil + + intern []string + flags uint32 + decodeMapFunc func(*Decoder) (interface{}, error) +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read data from r +// beyond the MessagePack values requested. Buffering can be disabled +// by passing a reader that implements io.ByteScanner interface. +func NewDecoder(r io.Reader) *Decoder { + d := new(Decoder) + d.Reset(r) + return d +} + +// Reset discards any buffered data, resets all state, and switches the buffered +// reader to read from r. +func (d *Decoder) Reset(r io.Reader) { + if br, ok := r.(bufReader); ok { + d.r = br + d.s = br + } else if br, ok := d.r.(*bufio.Reader); ok { + br.Reset(r) + } else { + br := bufio.NewReader(r) + d.r = br + d.s = br + } + + if d.intern != nil { + d.intern = d.intern[:0] + } + + //TODO: + //d.useLoose = false + //d.useJSONTag = false + //d.disallowUnknownFields = false + //d.decodeMapFunc = nil +} + +func (d *Decoder) SetDecodeMapFunc(fn func(*Decoder) (interface{}, error)) { + d.decodeMapFunc = fn +} + +// UseDecodeInterfaceLoose causes decoder to use DecodeInterfaceLoose +// to decode msgpack value into Go interface{}. +func (d *Decoder) UseDecodeInterfaceLoose(on bool) *Decoder { + if on { + d.flags |= looseIfaceFlag + } else { + d.flags &= ^looseIfaceFlag + } + return d +} + +// UseJSONTag causes the Decoder to use json struct tag as fallback option +// if there is no msgpack tag. +func (d *Decoder) UseJSONTag(on bool) *Decoder { + if on { + d.flags |= decodeUsingJSONFlag + } else { + d.flags &= ^decodeUsingJSONFlag + } + return d +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (d *Decoder) DisallowUnknownFields() { + if true { + d.flags |= disallowUnknownFieldsFlag + } else { + d.flags &= ^disallowUnknownFieldsFlag + } +} + +// Buffered returns a reader of the data remaining in the Decoder's buffer. +// The reader is valid until the next call to Decode. +func (d *Decoder) Buffered() io.Reader { + return d.r +} + +//nolint:gocyclo +func (d *Decoder) Decode(v interface{}) error { + var err error + switch v := v.(type) { + case *string: + if v != nil { + *v, err = d.DecodeString() + return err + } + case *[]byte: + if v != nil { + return d.decodeBytesPtr(v) + } + case *int: + if v != nil { + *v, err = d.DecodeInt() + return err + } + case *int8: + if v != nil { + *v, err = d.DecodeInt8() + return err + } + case *int16: + if v != nil { + *v, err = d.DecodeInt16() + return err + } + case *int32: + if v != nil { + *v, err = d.DecodeInt32() + return err + } + case *int64: + if v != nil { + *v, err = d.DecodeInt64() + return err + } + case *uint: + if v != nil { + *v, err = d.DecodeUint() + return err + } + case *uint8: + if v != nil { + *v, err = d.DecodeUint8() + return err + } + case *uint16: + if v != nil { + *v, err = d.DecodeUint16() + return err + } + case *uint32: + if v != nil { + *v, err = d.DecodeUint32() + return err + } + case *uint64: + if v != nil { + *v, err = d.DecodeUint64() + return err + } + case *bool: + if v != nil { + *v, err = d.DecodeBool() + return err + } + case *float32: + if v != nil { + *v, err = d.DecodeFloat32() + return err + } + case *float64: + if v != nil { + *v, err = d.DecodeFloat64() + return err + } + case *[]string: + return d.decodeStringSlicePtr(v) + case *map[string]string: + return d.decodeMapStringStringPtr(v) + case *map[string]interface{}: + return d.decodeMapStringInterfacePtr(v) + case *time.Duration: + if v != nil { + vv, err := d.DecodeInt64() + *v = time.Duration(vv) + return err + } + case *time.Time: + if v != nil { + *v, err = d.DecodeTime() + return err + } + } + + vv := reflect.ValueOf(v) + if !vv.IsValid() { + return errors.New("msgpack: Decode(nil)") + } + if vv.Kind() != reflect.Ptr { + return fmt.Errorf("msgpack: Decode(nonsettable %T)", v) + } + vv = vv.Elem() + if !vv.IsValid() { + return fmt.Errorf("msgpack: Decode(nonsettable %T)", v) + } + return d.DecodeValue(vv) +} + +func (d *Decoder) DecodeMulti(v ...interface{}) error { + for _, vv := range v { + if err := d.Decode(vv); err != nil { + return err + } + } + return nil +} + +func (d *Decoder) decodeInterfaceCond() (interface{}, error) { + if d.flags&looseIfaceFlag != 0 { + return d.DecodeInterfaceLoose() + } + return d.DecodeInterface() +} + +func (d *Decoder) DecodeValue(v reflect.Value) error { + decode := getDecoder(v.Type()) + return decode(d, v) +} + +func (d *Decoder) DecodeNil() error { + c, err := d.readCode() + if err != nil { + return err + } + if c != codes.Nil { + return fmt.Errorf("msgpack: invalid code=%x decoding nil", c) + } + return nil +} + +func (d *Decoder) decodeNilValue(v reflect.Value) error { + err := d.DecodeNil() + if v.IsNil() { + return err + } + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + v.Set(reflect.Zero(v.Type())) + return err +} + +func (d *Decoder) DecodeBool() (bool, error) { + c, err := d.readCode() + if err != nil { + return false, err + } + return d.bool(c) +} + +func (d *Decoder) bool(c codes.Code) (bool, error) { + if c == codes.False { + return false, nil + } + if c == codes.True { + return true, nil + } + return false, fmt.Errorf("msgpack: invalid code=%x decoding bool", c) +} + +func (d *Decoder) DecodeDuration() (time.Duration, error) { + n, err := d.DecodeInt64() + if err != nil { + return 0, err + } + return time.Duration(n), nil +} + +// DecodeInterface decodes value into interface. It returns following types: +// - nil, +// - bool, +// - int8, int16, int32, int64, +// - uint8, uint16, uint32, uint64, +// - float32 and float64, +// - string, +// - []byte, +// - slices of any of the above, +// - maps of any of the above. +// +// DecodeInterface should be used only when you don't know the type of value +// you are decoding. For example, if you are decoding number it is better to use +// DecodeInt64 for negative numbers and DecodeUint64 for positive numbers. +func (d *Decoder) DecodeInterface() (interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + + if codes.IsFixedNum(c) { + return int8(c), nil + } + if codes.IsFixedMap(c) { + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + } + if codes.IsFixedArray(c) { + return d.decodeSlice(c) + } + if codes.IsFixedString(c) { + return d.string(c) + } + + switch c { + case codes.Nil: + return nil, nil + case codes.False, codes.True: + return d.bool(c) + case codes.Float: + return d.float32(c) + case codes.Double: + return d.float64(c) + case codes.Uint8: + return d.uint8() + case codes.Uint16: + return d.uint16() + case codes.Uint32: + return d.uint32() + case codes.Uint64: + return d.uint64() + case codes.Int8: + return d.int8() + case codes.Int16: + return d.int16() + case codes.Int32: + return d.int32() + case codes.Int64: + return d.int64() + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.bytes(c, nil) + case codes.Str8, codes.Str16, codes.Str32: + return d.string(c) + case codes.Array16, codes.Array32: + return d.decodeSlice(c) + case codes.Map16, codes.Map32: + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.extInterface(c) + } + + return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) +} + +// DecodeInterfaceLoose is like DecodeInterface except that: +// - int8, int16, and int32 are converted to int64, +// - uint8, uint16, and uint32 are converted to uint64, +// - float32 is converted to float64. +func (d *Decoder) DecodeInterfaceLoose() (interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + + if codes.IsFixedNum(c) { + return int64(int8(c)), nil + } + if codes.IsFixedMap(c) { + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + } + if codes.IsFixedArray(c) { + return d.decodeSlice(c) + } + if codes.IsFixedString(c) { + return d.string(c) + } + + switch c { + case codes.Nil: + return nil, nil + case codes.False, codes.True: + return d.bool(c) + case codes.Float, codes.Double: + return d.float64(c) + case codes.Uint8, codes.Uint16, codes.Uint32, codes.Uint64: + return d.uint(c) + case codes.Int8, codes.Int16, codes.Int32, codes.Int64: + return d.int(c) + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.bytes(c, nil) + case codes.Str8, codes.Str16, codes.Str32: + return d.string(c) + case codes.Array16, codes.Array32: + return d.decodeSlice(c) + case codes.Map16, codes.Map32: + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.extInterface(c) + } + + return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) +} + +// Skip skips next value. +func (d *Decoder) Skip() error { + c, err := d.readCode() + if err != nil { + return err + } + + if codes.IsFixedNum(c) { + return nil + } + if codes.IsFixedMap(c) { + return d.skipMap(c) + } + if codes.IsFixedArray(c) { + return d.skipSlice(c) + } + if codes.IsFixedString(c) { + return d.skipBytes(c) + } + + switch c { + case codes.Nil, codes.False, codes.True: + return nil + case codes.Uint8, codes.Int8: + return d.skipN(1) + case codes.Uint16, codes.Int16: + return d.skipN(2) + case codes.Uint32, codes.Int32, codes.Float: + return d.skipN(4) + case codes.Uint64, codes.Int64, codes.Double: + return d.skipN(8) + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.skipBytes(c) + case codes.Str8, codes.Str16, codes.Str32: + return d.skipBytes(c) + case codes.Array16, codes.Array32: + return d.skipSlice(c) + case codes.Map16, codes.Map32: + return d.skipMap(c) + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.skipExt(c) + } + + return fmt.Errorf("msgpack: unknown code %x", c) +} + +// PeekCode returns the next MessagePack code without advancing the reader. +// Subpackage msgpack/codes contains list of available codes. +func (d *Decoder) PeekCode() (codes.Code, error) { + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + return codes.Code(c), d.s.UnreadByte() +} + +func (d *Decoder) hasNilCode() bool { + code, err := d.PeekCode() + return err == nil && code == codes.Nil +} + +func (d *Decoder) readCode() (codes.Code, error) { + d.extLen = 0 + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + if d.rec != nil { + d.rec = append(d.rec, c) + } + return codes.Code(c), nil +} + +func (d *Decoder) readFull(b []byte) error { + _, err := io.ReadFull(d.r, b) + if err != nil { + return err + } + if d.rec != nil { + //TODO: read directly into d.rec? + d.rec = append(d.rec, b...) + } + return nil +} + +func (d *Decoder) readN(n int) ([]byte, error) { + var err error + d.buf, err = readN(d.r, d.buf, n) + if err != nil { + return nil, err + } + if d.rec != nil { + //TODO: read directly into d.rec? + d.rec = append(d.rec, d.buf...) + } + return d.buf, nil +} + +func readN(r io.Reader, b []byte, n int) ([]byte, error) { + if b == nil { + if n == 0 { + return make([]byte, 0), nil + } + switch { + case n < 64: + b = make([]byte, 0, 64) + case n <= bytesAllocLimit: + b = make([]byte, 0, n) + default: + b = make([]byte, 0, bytesAllocLimit) + } + } + + if n <= cap(b) { + b = b[:n] + _, err := io.ReadFull(r, b) + return b, err + } + b = b[:cap(b)] + + var pos int + for { + alloc := min(n-len(b), bytesAllocLimit) + b = append(b, make([]byte, alloc)...) + + _, err := io.ReadFull(r, b[pos:]) + if err != nil { + return b, err + } + + if len(b) == n { + break + } + pos = len(b) + } + + return b, nil +} + +func min(a, b int) int { //nolint:unparam + if a <= b { + return a + } + return b +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_map.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_map.go new file mode 100644 index 000000000..16c40fe77 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/decode_map.go @@ -0,0 +1,350 @@ +package msgpack + +import ( + "errors" + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +var ( + mapStringStringPtrType = reflect.TypeOf((*map[string]string)(nil)) + mapStringStringType = mapStringStringPtrType.Elem() +) + +var ( + mapStringInterfacePtrType = reflect.TypeOf((*map[string]interface{})(nil)) + mapStringInterfaceType = mapStringInterfacePtrType.Elem() +) + +func decodeMapValue(d *Decoder, v reflect.Value) error { + size, err := d.DecodeMapLen() + if err != nil { + return err + } + + typ := v.Type() + if size == -1 { + v.Set(reflect.Zero(typ)) + return nil + } + + if v.IsNil() { + v.Set(reflect.MakeMap(typ)) + } + if size == 0 { + return nil + } + + return decodeMapValueSize(d, v, size) +} + +func decodeMapValueSize(d *Decoder, v reflect.Value, size int) error { + typ := v.Type() + keyType := typ.Key() + valueType := typ.Elem() + + for i := 0; i < size; i++ { + mk := reflect.New(keyType).Elem() + if err := d.DecodeValue(mk); err != nil { + return err + } + + mv := reflect.New(valueType).Elem() + if err := d.DecodeValue(mv); err != nil { + return err + } + + v.SetMapIndex(mk, mv) + } + + return nil +} + +// DecodeMapLen decodes map length. Length is -1 when map is nil. +func (d *Decoder) DecodeMapLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + + if codes.IsExt(c) { + if err = d.skipExtHeader(c); err != nil { + return 0, err + } + + c, err = d.readCode() + if err != nil { + return 0, err + } + } + return d.mapLen(c) +} + +func (d *Decoder) mapLen(c codes.Code) (int, error) { + size, err := d._mapLen(c) + err = expandInvalidCodeMapLenError(c, err) + return size, err +} + +func (d *Decoder) _mapLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } + if c >= codes.FixedMapLow && c <= codes.FixedMapHigh { + return int(c & codes.FixedMapMask), nil + } + if c == codes.Map16 { + size, err := d.uint16() + return int(size), err + } + if c == codes.Map32 { + size, err := d.uint32() + return int(size), err + } + return 0, errInvalidCode +} + +var errInvalidCode = errors.New("invalid code") + +func expandInvalidCodeMapLenError(c codes.Code, err error) error { + if err == errInvalidCode { + return fmt.Errorf("msgpack: invalid code=%x decoding map length", c) + } + return err +} + +func decodeMapStringStringValue(d *Decoder, v reflect.Value) error { + mptr := v.Addr().Convert(mapStringStringPtrType).Interface().(*map[string]string) + return d.decodeMapStringStringPtr(mptr) +} + +func (d *Decoder) decodeMapStringStringPtr(ptr *map[string]string) error { + size, err := d.DecodeMapLen() + if err != nil { + return err + } + if size == -1 { + *ptr = nil + return nil + } + + m := *ptr + if m == nil { + *ptr = make(map[string]string, min(size, maxMapSize)) + m = *ptr + } + + for i := 0; i < size; i++ { + mk, err := d.DecodeString() + if err != nil { + return err + } + mv, err := d.DecodeString() + if err != nil { + return err + } + m[mk] = mv + } + + return nil +} + +func decodeMapStringInterfaceValue(d *Decoder, v reflect.Value) error { + ptr := v.Addr().Convert(mapStringInterfacePtrType).Interface().(*map[string]interface{}) + return d.decodeMapStringInterfacePtr(ptr) +} + +func (d *Decoder) decodeMapStringInterfacePtr(ptr *map[string]interface{}) error { + n, err := d.DecodeMapLen() + if err != nil { + return err + } + if n == -1 { + *ptr = nil + return nil + } + + m := *ptr + if m == nil { + *ptr = make(map[string]interface{}, min(n, maxMapSize)) + m = *ptr + } + + for i := 0; i < n; i++ { + mk, err := d.DecodeString() + if err != nil { + return err + } + mv, err := d.decodeInterfaceCond() + if err != nil { + return err + } + m[mk] = mv + } + + return nil +} + +var errUnsupportedMapKey = errors.New("msgpack: unsupported map key") + +func (d *Decoder) DecodeMap() (interface{}, error) { + if d.decodeMapFunc != nil { + return d.decodeMapFunc(d) + } + + size, err := d.DecodeMapLen() + if err != nil { + return nil, err + } + if size == -1 { + return nil, nil + } + if size == 0 { + return make(map[string]interface{}), nil + } + + code, err := d.PeekCode() + if err != nil { + return nil, err + } + + if codes.IsString(code) || codes.IsBin(code) { + return d.decodeMapStringInterfaceSize(size) + } + + key, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + value, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + keyType := reflect.TypeOf(key) + valueType := reflect.TypeOf(value) + + if !keyType.Comparable() { + return nil, errUnsupportedMapKey + } + + mapType := reflect.MapOf(keyType, valueType) + mapValue := reflect.MakeMap(mapType) + + mapValue.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(value)) + size-- + + err = decodeMapValueSize(d, mapValue, size) + if err != nil { + return nil, err + } + + return mapValue.Interface(), nil +} + +func (d *Decoder) decodeMapStringInterfaceSize(size int) (map[string]interface{}, error) { + m := make(map[string]interface{}, min(size, maxMapSize)) + for i := 0; i < size; i++ { + mk, err := d.DecodeString() + if err != nil { + return nil, err + } + mv, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + m[mk] = mv + } + return m, nil +} + +func (d *Decoder) skipMap(c codes.Code) error { + n, err := d.mapLen(c) + if err != nil { + return err + } + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + if err := d.Skip(); err != nil { + return err + } + } + return nil +} + +func decodeStructValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + var isArray bool + + n, err := d._mapLen(c) + if err != nil { + var err2 error + n, err2 = d.arrayLen(c) + if err2 != nil { + return expandInvalidCodeMapLenError(c, err) + } + isArray = true + } + if n == -1 { + if err = mustSet(v); err != nil { + return err + } + v.Set(reflect.Zero(v.Type())) + return nil + } + + var fields *fields + if d.flags&decodeUsingJSONFlag != 0 { + fields = jsonStructs.Fields(v.Type()) + } else { + fields = structs.Fields(v.Type()) + } + + if isArray { + for i, f := range fields.List { + if i >= n { + break + } + if err := f.DecodeValue(d, v); err != nil { + return err + } + } + + // Skip extra values. + for i := len(fields.List); i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + + return nil + } + + for i := 0; i < n; i++ { + name, err := d.DecodeString() + if err != nil { + return err + } + + if f := fields.Map[name]; f != nil { + if err := f.DecodeValue(d, v); err != nil { + return err + } + } else if d.flags&disallowUnknownFieldsFlag != 0 { + return fmt.Errorf("msgpack: unknown field %q", name) + } else if err := d.Skip(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_number.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_number.go new file mode 100644 index 000000000..f6b9151f0 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/decode_number.go @@ -0,0 +1,307 @@ +package msgpack + +import ( + "fmt" + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +func (d *Decoder) skipN(n int) error { + _, err := d.readN(n) + return err +} + +func (d *Decoder) uint8() (uint8, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return uint8(c), nil +} + +func (d *Decoder) int8() (int8, error) { + n, err := d.uint8() + return int8(n), err +} + +func (d *Decoder) uint16() (uint16, error) { + b, err := d.readN(2) + if err != nil { + return 0, err + } + return (uint16(b[0]) << 8) | uint16(b[1]), nil +} + +func (d *Decoder) int16() (int16, error) { + n, err := d.uint16() + return int16(n), err +} + +func (d *Decoder) uint32() (uint32, error) { + b, err := d.readN(4) + if err != nil { + return 0, err + } + n := (uint32(b[0]) << 24) | + (uint32(b[1]) << 16) | + (uint32(b[2]) << 8) | + uint32(b[3]) + return n, nil +} + +func (d *Decoder) int32() (int32, error) { + n, err := d.uint32() + return int32(n), err +} + +func (d *Decoder) uint64() (uint64, error) { + b, err := d.readN(8) + if err != nil { + return 0, err + } + n := (uint64(b[0]) << 56) | + (uint64(b[1]) << 48) | + (uint64(b[2]) << 40) | + (uint64(b[3]) << 32) | + (uint64(b[4]) << 24) | + (uint64(b[5]) << 16) | + (uint64(b[6]) << 8) | + uint64(b[7]) + return n, nil +} + +func (d *Decoder) int64() (int64, error) { + n, err := d.uint64() + return int64(n), err +} + +// DecodeUint64 decodes msgpack int8/16/32/64 and uint8/16/32/64 +// into Go uint64. +func (d *Decoder) DecodeUint64() (uint64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.uint(c) +} + +func (d *Decoder) uint(c codes.Code) (uint64, error) { + if c == codes.Nil { + return 0, nil + } + if codes.IsFixedNum(c) { + return uint64(int8(c)), nil + } + switch c { + case codes.Uint8: + n, err := d.uint8() + return uint64(n), err + case codes.Int8: + n, err := d.int8() + return uint64(n), err + case codes.Uint16: + n, err := d.uint16() + return uint64(n), err + case codes.Int16: + n, err := d.int16() + return uint64(n), err + case codes.Uint32: + n, err := d.uint32() + return uint64(n), err + case codes.Int32: + n, err := d.int32() + return uint64(n), err + case codes.Uint64, codes.Int64: + return d.uint64() + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding uint64", c) +} + +// DecodeInt64 decodes msgpack int8/16/32/64 and uint8/16/32/64 +// into Go int64. +func (d *Decoder) DecodeInt64() (int64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.int(c) +} + +func (d *Decoder) int(c codes.Code) (int64, error) { + if c == codes.Nil { + return 0, nil + } + if codes.IsFixedNum(c) { + return int64(int8(c)), nil + } + switch c { + case codes.Uint8: + n, err := d.uint8() + return int64(n), err + case codes.Int8: + n, err := d.uint8() + return int64(int8(n)), err + case codes.Uint16: + n, err := d.uint16() + return int64(n), err + case codes.Int16: + n, err := d.uint16() + return int64(int16(n)), err + case codes.Uint32: + n, err := d.uint32() + return int64(n), err + case codes.Int32: + n, err := d.uint32() + return int64(int32(n)), err + case codes.Uint64, codes.Int64: + n, err := d.uint64() + return int64(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding int64", c) +} + +func (d *Decoder) DecodeFloat32() (float32, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.float32(c) +} + +func (d *Decoder) float32(c codes.Code) (float32, error) { + if c == codes.Float { + n, err := d.uint32() + if err != nil { + return 0, err + } + return math.Float32frombits(n), nil + } + + n, err := d.int(c) + if err != nil { + return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) + } + return float32(n), nil +} + +// DecodeFloat64 decodes msgpack float32/64 into Go float64. +func (d *Decoder) DecodeFloat64() (float64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.float64(c) +} + +func (d *Decoder) float64(c codes.Code) (float64, error) { + switch c { + case codes.Float: + n, err := d.float32(c) + if err != nil { + return 0, err + } + return float64(n), nil + case codes.Double: + n, err := d.uint64() + if err != nil { + return 0, err + } + return math.Float64frombits(n), nil + } + + n, err := d.int(c) + if err != nil { + return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) + } + return float64(n), nil +} + +func (d *Decoder) DecodeUint() (uint, error) { + n, err := d.DecodeUint64() + return uint(n), err +} + +func (d *Decoder) DecodeUint8() (uint8, error) { + n, err := d.DecodeUint64() + return uint8(n), err +} + +func (d *Decoder) DecodeUint16() (uint16, error) { + n, err := d.DecodeUint64() + return uint16(n), err +} + +func (d *Decoder) DecodeUint32() (uint32, error) { + n, err := d.DecodeUint64() + return uint32(n), err +} + +func (d *Decoder) DecodeInt() (int, error) { + n, err := d.DecodeInt64() + return int(n), err +} + +func (d *Decoder) DecodeInt8() (int8, error) { + n, err := d.DecodeInt64() + return int8(n), err +} + +func (d *Decoder) DecodeInt16() (int16, error) { + n, err := d.DecodeInt64() + return int16(n), err +} + +func (d *Decoder) DecodeInt32() (int32, error) { + n, err := d.DecodeInt64() + return int32(n), err +} + +func decodeFloat32Value(d *Decoder, v reflect.Value) error { + f, err := d.DecodeFloat32() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetFloat(float64(f)) + return nil +} + +func decodeFloat64Value(d *Decoder, v reflect.Value) error { + f, err := d.DecodeFloat64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetFloat(f) + return nil +} + +func decodeInt64Value(d *Decoder, v reflect.Value) error { + n, err := d.DecodeInt64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetInt(n) + return nil +} + +func decodeUint64Value(d *Decoder, v reflect.Value) error { + n, err := d.DecodeUint64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetUint(n) + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_query.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_query.go new file mode 100644 index 000000000..80cd80e78 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/decode_query.go @@ -0,0 +1,158 @@ +package msgpack + +import ( + "fmt" + "strconv" + "strings" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +type queryResult struct { + query string + key string + hasAsterisk bool + + values []interface{} +} + +func (q *queryResult) nextKey() { + ind := strings.IndexByte(q.query, '.') + if ind == -1 { + q.key = q.query + q.query = "" + return + } + q.key = q.query[:ind] + q.query = q.query[ind+1:] +} + +// Query extracts data specified by the query from the msgpack stream skipping +// any other data. Query consists of map keys and array indexes separated with dot, +// e.g. key1.0.key2. +func (d *Decoder) Query(query string) ([]interface{}, error) { + res := queryResult{ + query: query, + } + if err := d.query(&res); err != nil { + return nil, err + } + return res.values, nil +} + +func (d *Decoder) query(q *queryResult) error { + q.nextKey() + if q.key == "" { + v, err := d.decodeInterfaceCond() + if err != nil { + return err + } + q.values = append(q.values, v) + return nil + } + + code, err := d.PeekCode() + if err != nil { + return err + } + + switch { + case code == codes.Map16 || code == codes.Map32 || codes.IsFixedMap(code): + err = d.queryMapKey(q) + case code == codes.Array16 || code == codes.Array32 || codes.IsFixedArray(code): + err = d.queryArrayIndex(q) + default: + err = fmt.Errorf("msgpack: unsupported code=%x decoding key=%q", code, q.key) + } + return err +} + +func (d *Decoder) queryMapKey(q *queryResult) error { + n, err := d.DecodeMapLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + for i := 0; i < n; i++ { + k, err := d.bytesNoCopy() + if err != nil { + return err + } + + if string(k) == q.key { + if err := d.query(q); err != nil { + return err + } + if q.hasAsterisk { + return d.skipNext((n - i - 1) * 2) + } + return nil + } + + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) queryArrayIndex(q *queryResult) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + if q.key == "*" { + q.hasAsterisk = true + + query := q.query + for i := 0; i < n; i++ { + q.query = query + if err := d.query(q); err != nil { + return err + } + } + + q.hasAsterisk = false + return nil + } + + ind, err := strconv.Atoi(q.key) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if i == ind { + if err := d.query(q); err != nil { + return err + } + if q.hasAsterisk { + return d.skipNext(n - i - 1) + } + return nil + } + + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) skipNext(n int) error { + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_slice.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_slice.go new file mode 100644 index 000000000..adf17ae5c --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/decode_slice.go @@ -0,0 +1,191 @@ +package msgpack + +import ( + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +var sliceStringPtrType = reflect.TypeOf((*[]string)(nil)) + +// DecodeArrayLen decodes array length. Length is -1 when array is nil. +func (d *Decoder) DecodeArrayLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.arrayLen(c) +} + +func (d *Decoder) arrayLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } else if c >= codes.FixedArrayLow && c <= codes.FixedArrayHigh { + return int(c & codes.FixedArrayMask), nil + } + switch c { + case codes.Array16: + n, err := d.uint16() + return int(n), err + case codes.Array32: + n, err := d.uint32() + return int(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding array length", c) +} + +func decodeStringSliceValue(d *Decoder, v reflect.Value) error { + ptr := v.Addr().Convert(sliceStringPtrType).Interface().(*[]string) + return d.decodeStringSlicePtr(ptr) +} + +func (d *Decoder) decodeStringSlicePtr(ptr *[]string) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + ss := makeStrings(*ptr, n) + for i := 0; i < n; i++ { + s, err := d.DecodeString() + if err != nil { + return err + } + ss = append(ss, s) + } + *ptr = ss + + return nil +} + +func makeStrings(s []string, n int) []string { + if n > sliceAllocLimit { + n = sliceAllocLimit + } + + if s == nil { + return make([]string, 0, n) + } + + if cap(s) >= n { + return s[:0] + } + + s = s[:cap(s)] + s = append(s, make([]string, n-len(s))...) + return s[:0] +} + +func decodeSliceValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + + if n == -1 { + v.Set(reflect.Zero(v.Type())) + return nil + } + if n == 0 && v.IsNil() { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + return nil + } + + if v.Cap() >= n { + v.Set(v.Slice(0, n)) + } else if v.Len() < v.Cap() { + v.Set(v.Slice(0, v.Cap())) + } + + for i := 0; i < n; i++ { + if i >= v.Len() { + v.Set(growSliceValue(v, n)) + } + elem := v.Index(i) + if err := d.DecodeValue(elem); err != nil { + return err + } + } + + return nil +} + +func growSliceValue(v reflect.Value, n int) reflect.Value { + diff := n - v.Len() + if diff > sliceAllocLimit { + diff = sliceAllocLimit + } + v = reflect.AppendSlice(v, reflect.MakeSlice(v.Type(), diff, diff)) + return v +} + +func decodeArrayValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + + if n == -1 { + return nil + } + if n > v.Len() { + return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) + } + + for i := 0; i < n; i++ { + sv := v.Index(i) + if err := d.DecodeValue(sv); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) DecodeSlice() ([]interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + return d.decodeSlice(c) +} + +func (d *Decoder) decodeSlice(c codes.Code) ([]interface{}, error) { + n, err := d.arrayLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + + s := make([]interface{}, 0, min(n, sliceAllocLimit)) + for i := 0; i < n; i++ { + v, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + s = append(s, v) + } + + return s, nil +} + +func (d *Decoder) skipSlice(c codes.Code) error { + n, err := d.arrayLen(c) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_string.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_string.go new file mode 100644 index 000000000..c5adf3865 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/decode_string.go @@ -0,0 +1,186 @@ +package msgpack + +import ( + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +func (d *Decoder) bytesLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } + + if codes.IsFixedString(c) { + return int(c & codes.FixedStrMask), nil + } + + switch c { + case codes.Str8, codes.Bin8: + n, err := d.uint8() + return int(n), err + case codes.Str16, codes.Bin16: + n, err := d.uint16() + return int(n), err + case codes.Str32, codes.Bin32: + n, err := d.uint32() + return int(n), err + } + + return 0, fmt.Errorf("msgpack: invalid code=%x decoding bytes length", c) +} + +func (d *Decoder) DecodeString() (string, error) { + c, err := d.readCode() + if err != nil { + return "", err + } + return d.string(c) +} + +func (d *Decoder) string(c codes.Code) (string, error) { + n, err := d.bytesLen(c) + if err != nil { + return "", err + } + return d.stringWithLen(n) +} + +func (d *Decoder) stringWithLen(n int) (string, error) { + if n <= 0 { + return "", nil + } + b, err := d.readN(n) + return string(b), err +} + +func decodeStringValue(d *Decoder, v reflect.Value) error { + if err := mustSet(v); err != nil { + return err + } + + s, err := d.DecodeString() + if err != nil { + return err + } + + v.SetString(s) + return nil +} + +func (d *Decoder) DecodeBytesLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.bytesLen(c) +} + +func (d *Decoder) DecodeBytes() ([]byte, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + return d.bytes(c, nil) +} + +func (d *Decoder) bytes(c codes.Code, b []byte) ([]byte, error) { + n, err := d.bytesLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + return readN(d.r, b, n) +} + +func (d *Decoder) bytesNoCopy() ([]byte, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + n, err := d.bytesLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + return d.readN(n) +} + +func (d *Decoder) decodeBytesPtr(ptr *[]byte) error { + c, err := d.readCode() + if err != nil { + return err + } + return d.bytesPtr(c, ptr) +} + +func (d *Decoder) bytesPtr(c codes.Code, ptr *[]byte) error { + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + *ptr = nil + return nil + } + + *ptr, err = readN(d.r, *ptr, n) + return err +} + +func (d *Decoder) skipBytes(c codes.Code) error { + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n <= 0 { + return nil + } + return d.skipN(n) +} + +func decodeBytesValue(d *Decoder, v reflect.Value) error { + if err := mustSet(v); err != nil { + return err + } + + c, err := d.readCode() + if err != nil { + return err + } + + b, err := d.bytes(c, v.Bytes()) + if err != nil { + return err + } + + v.SetBytes(b) + + return nil +} + +func decodeByteArrayValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + return nil + } + if n > v.Len() { + return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) + } + + b := v.Slice(0, n).Bytes() + return d.readFull(b) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_value.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_value.go new file mode 100644 index 000000000..810d3be8f --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/decode_value.go @@ -0,0 +1,276 @@ +package msgpack + +import ( + "encoding" + "errors" + "fmt" + "reflect" +) + +var interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() +var stringType = reflect.TypeOf((*string)(nil)).Elem() + +var valueDecoders []decoderFunc + +//nolint:gochecknoinits +func init() { + valueDecoders = []decoderFunc{ + reflect.Bool: decodeBoolValue, + reflect.Int: decodeInt64Value, + reflect.Int8: decodeInt64Value, + reflect.Int16: decodeInt64Value, + reflect.Int32: decodeInt64Value, + reflect.Int64: decodeInt64Value, + reflect.Uint: decodeUint64Value, + reflect.Uint8: decodeUint64Value, + reflect.Uint16: decodeUint64Value, + reflect.Uint32: decodeUint64Value, + reflect.Uint64: decodeUint64Value, + reflect.Float32: decodeFloat32Value, + reflect.Float64: decodeFloat64Value, + reflect.Complex64: decodeUnsupportedValue, + reflect.Complex128: decodeUnsupportedValue, + reflect.Array: decodeArrayValue, + reflect.Chan: decodeUnsupportedValue, + reflect.Func: decodeUnsupportedValue, + reflect.Interface: decodeInterfaceValue, + reflect.Map: decodeMapValue, + reflect.Ptr: decodeUnsupportedValue, + reflect.Slice: decodeSliceValue, + reflect.String: decodeStringValue, + reflect.Struct: decodeStructValue, + reflect.UnsafePointer: decodeUnsupportedValue, + } +} + +func mustSet(v reflect.Value) error { + if !v.CanSet() { + return fmt.Errorf("msgpack: Decode(nonsettable %s)", v.Type()) + } + return nil +} + +func getDecoder(typ reflect.Type) decoderFunc { + if v, ok := typeDecMap.Load(typ); ok { + return v.(decoderFunc) + } + fn := _getDecoder(typ) + typeDecMap.Store(typ, fn) + return fn +} + +func _getDecoder(typ reflect.Type) decoderFunc { + kind := typ.Kind() + + if typ.Implements(customDecoderType) { + return decodeCustomValue + } + if typ.Implements(unmarshalerType) { + return unmarshalValue + } + if typ.Implements(binaryUnmarshalerType) { + return unmarshalBinaryValue + } + + // Addressable struct field value. + if kind != reflect.Ptr { + ptr := reflect.PtrTo(typ) + if ptr.Implements(customDecoderType) { + return decodeCustomValueAddr + } + if ptr.Implements(unmarshalerType) { + return unmarshalValueAddr + } + if ptr.Implements(binaryUnmarshalerType) { + return unmarshalBinaryValueAddr + } + } + + switch kind { + case reflect.Ptr: + return ptrDecoderFunc(typ) + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + return decodeBytesValue + } + if elem == stringType { + return decodeStringSliceValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return decodeByteArrayValue + } + case reflect.Map: + if typ.Key() == stringType { + switch typ.Elem() { + case stringType: + return decodeMapStringStringValue + case interfaceType: + return decodeMapStringInterfaceValue + } + } + } + + return valueDecoders[kind] +} + +func ptrDecoderFunc(typ reflect.Type) decoderFunc { + decoder := getDecoder(typ.Elem()) + return func(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + if err := mustSet(v); err != nil { + return err + } + if !v.IsNil() { + v.Set(reflect.Zero(v.Type())) + } + return d.DecodeNil() + } + if v.IsNil() { + if err := mustSet(v); err != nil { + return err + } + v.Set(reflect.New(v.Type().Elem())) + } + return decoder(d, v.Elem()) + } +} + +func decodeCustomValueAddr(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return decodeCustomValue(d, v.Addr()) +} + +func decodeCustomValue(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + + decoder := v.Interface().(CustomDecoder) + return decoder.DecodeMsgpack(d) +} + +func unmarshalValueAddr(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return unmarshalValue(d, v.Addr()) +} + +func unmarshalValue(d *Decoder, v reflect.Value) error { + if d.extLen == 0 || d.extLen == 1 { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + } + + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + + var b []byte + + if d.extLen != 0 { + var err error + b, err = d.readN(d.extLen) + if err != nil { + return err + } + } else { + d.rec = make([]byte, 0, 64) + if err := d.Skip(); err != nil { + return err + } + b = d.rec + d.rec = nil + } + + unmarshaler := v.Interface().(Unmarshaler) + return unmarshaler.UnmarshalMsgpack(b) +} + +func decodeBoolValue(d *Decoder, v reflect.Value) error { + flag, err := d.DecodeBool() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetBool(flag) + return nil +} + +func decodeInterfaceValue(d *Decoder, v reflect.Value) error { + if v.IsNil() { + return d.interfaceValue(v) + } + + elem := v.Elem() + if !elem.CanAddr() { + if d.hasNilCode() { + v.Set(reflect.Zero(v.Type())) + return d.DecodeNil() + } + } + + return d.DecodeValue(elem) +} + +func (d *Decoder) interfaceValue(v reflect.Value) error { + vv, err := d.decodeInterfaceCond() + if err != nil { + return err + } + + if vv != nil { + if v.Type() == errorType { + if vv, ok := vv.(string); ok { + v.Set(reflect.ValueOf(errors.New(vv))) + return nil + } + } + + v.Set(reflect.ValueOf(vv)) + } + + return nil +} + +func decodeUnsupportedValue(d *Decoder, v reflect.Value) error { + return fmt.Errorf("msgpack: Decode(unsupported %s)", v.Type()) +} + +//------------------------------------------------------------------------------ + +func unmarshalBinaryValueAddr(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return unmarshalBinaryValue(d, v.Addr()) +} + +func unmarshalBinaryValue(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + + data, err := d.DecodeBytes() + if err != nil { + return err + } + + unmarshaler := v.Interface().(encoding.BinaryUnmarshaler) + return unmarshaler.UnmarshalBinary(data) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/encode.go b/vendor/github.com/vmihailenco/msgpack/v4/encode.go new file mode 100644 index 000000000..37f098701 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/encode.go @@ -0,0 +1,241 @@ +package msgpack + +import ( + "bytes" + "io" + "reflect" + "sync" + "time" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +const ( + sortMapKeysFlag uint32 = 1 << iota + structAsArrayFlag + encodeUsingJSONFlag + useCompactIntsFlag + useCompactFloatsFlag +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +type byteWriter struct { + io.Writer + + buf [1]byte +} + +func newByteWriter(w io.Writer) *byteWriter { + bw := new(byteWriter) + bw.Reset(w) + return bw +} + +func (bw *byteWriter) Reset(w io.Writer) { + bw.Writer = w +} + +func (bw *byteWriter) WriteByte(c byte) error { + bw.buf[0] = c + _, err := bw.Write(bw.buf[:]) + return err +} + +//------------------------------------------------------------------------------ + +var encPool = sync.Pool{ + New: func() interface{} { + return NewEncoder(nil) + }, +} + +// Marshal returns the MessagePack encoding of v. +func Marshal(v interface{}) ([]byte, error) { + enc := encPool.Get().(*Encoder) + + var buf bytes.Buffer + enc.Reset(&buf) + + err := enc.Encode(v) + b := buf.Bytes() + + encPool.Put(enc) + + if err != nil { + return nil, err + } + return b, err +} + +type Encoder struct { + w writer + + buf []byte + timeBuf []byte + bootstrap [9 + 12]byte + + intern map[string]int + + flags uint32 +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + e := new(Encoder) + e.buf = e.bootstrap[:9] + e.timeBuf = e.bootstrap[9 : 9+12] + e.Reset(w) + return e +} + +func (e *Encoder) Reset(w io.Writer) { + if bw, ok := w.(writer); ok { + e.w = bw + } else if bw, ok := e.w.(*byteWriter); ok { + bw.Reset(w) + } else { + e.w = newByteWriter(w) + } + + for k := range e.intern { + delete(e.intern, k) + } + + //TODO: + //e.sortMapKeys = false + //e.structAsArray = false + //e.useJSONTag = false + //e.useCompact = false +} + +// SortMapKeys causes the Encoder to encode map keys in increasing order. +// Supported map types are: +// - map[string]string +// - map[string]interface{} +func (e *Encoder) SortMapKeys(on bool) *Encoder { + if on { + e.flags |= sortMapKeysFlag + } else { + e.flags &= ^sortMapKeysFlag + } + return e +} + +// StructAsArray causes the Encoder to encode Go structs as msgpack arrays. +func (e *Encoder) StructAsArray(on bool) *Encoder { + if on { + e.flags |= structAsArrayFlag + } else { + e.flags &= ^structAsArrayFlag + } + return e +} + +// UseJSONTag causes the Encoder to use json struct tag as fallback option +// if there is no msgpack tag. +func (e *Encoder) UseJSONTag(on bool) *Encoder { + if on { + e.flags |= encodeUsingJSONFlag + } else { + e.flags &= ^encodeUsingJSONFlag + } + return e +} + +// UseCompactEncoding causes the Encoder to chose the most compact encoding. +// For example, it allows to encode small Go int64 as msgpack int8 saving 7 bytes. +func (e *Encoder) UseCompactEncoding(on bool) *Encoder { + if on { + e.flags |= useCompactIntsFlag + } else { + e.flags &= ^useCompactIntsFlag + } + return e +} + +// UseCompactFloats causes the Encoder to chose a compact integer encoding +// for floats that can be represented as integers. +func (e *Encoder) UseCompactFloats(on bool) { + if on { + e.flags |= useCompactFloatsFlag + } else { + e.flags &= ^useCompactFloatsFlag + } +} + +func (e *Encoder) Encode(v interface{}) error { + switch v := v.(type) { + case nil: + return e.EncodeNil() + case string: + return e.EncodeString(v) + case []byte: + return e.EncodeBytes(v) + case int: + return e.encodeInt64Cond(int64(v)) + case int64: + return e.encodeInt64Cond(v) + case uint: + return e.encodeUint64Cond(uint64(v)) + case uint64: + return e.encodeUint64Cond(v) + case bool: + return e.EncodeBool(v) + case float32: + return e.EncodeFloat32(v) + case float64: + return e.EncodeFloat64(v) + case time.Duration: + return e.encodeInt64Cond(int64(v)) + case time.Time: + return e.EncodeTime(v) + } + return e.EncodeValue(reflect.ValueOf(v)) +} + +func (e *Encoder) EncodeMulti(v ...interface{}) error { + for _, vv := range v { + if err := e.Encode(vv); err != nil { + return err + } + } + return nil +} + +func (e *Encoder) EncodeValue(v reflect.Value) error { + fn := getEncoder(v.Type()) + return fn(e, v) +} + +func (e *Encoder) EncodeNil() error { + return e.writeCode(codes.Nil) +} + +func (e *Encoder) EncodeBool(value bool) error { + if value { + return e.writeCode(codes.True) + } + return e.writeCode(codes.False) +} + +func (e *Encoder) EncodeDuration(d time.Duration) error { + return e.EncodeInt(int64(d)) +} + +func (e *Encoder) writeCode(c codes.Code) error { + return e.w.WriteByte(byte(c)) +} + +func (e *Encoder) write(b []byte) error { + _, err := e.w.Write(b) + return err +} + +func (e *Encoder) writeString(s string) error { + _, err := e.w.Write(stringToBytes(s)) + return err +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/encode_map.go b/vendor/github.com/vmihailenco/msgpack/v4/encode_map.go new file mode 100644 index 000000000..d9b954d86 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/encode_map.go @@ -0,0 +1,172 @@ +package msgpack + +import ( + "reflect" + "sort" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +func encodeMapValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + for _, key := range v.MapKeys() { + if err := e.EncodeValue(key); err != nil { + return err + } + if err := e.EncodeValue(v.MapIndex(key)); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringStringValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + m := v.Convert(mapStringStringType).Interface().(map[string]string) + if e.flags&sortMapKeysFlag != 0 { + return e.encodeSortedMapStringString(m) + } + + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.EncodeString(mv); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + m := v.Convert(mapStringInterfaceType).Interface().(map[string]interface{}) + if e.flags&sortMapKeysFlag != 0 { + return e.encodeSortedMapStringInterface(m) + } + + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.Encode(mv); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSortedMapStringString(m map[string]string) error { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + err := e.EncodeString(k) + if err != nil { + return err + } + if err = e.EncodeString(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSortedMapStringInterface(m map[string]interface{}) error { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + err := e.EncodeString(k) + if err != nil { + return err + } + if err = e.Encode(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) EncodeMapLen(l int) error { + if l < 16 { + return e.writeCode(codes.FixedMapLow | codes.Code(l)) + } + if l < 65536 { + return e.write2(codes.Map16, uint16(l)) + } + return e.write4(codes.Map32, uint32(l)) +} + +func encodeStructValue(e *Encoder, strct reflect.Value) error { + var structFields *fields + if e.flags&encodeUsingJSONFlag != 0 { + structFields = jsonStructs.Fields(strct.Type()) + } else { + structFields = structs.Fields(strct.Type()) + } + + if e.flags&structAsArrayFlag != 0 || structFields.AsArray { + return encodeStructValueAsArray(e, strct, structFields.List) + } + fields := structFields.OmitEmpty(strct) + + if err := e.EncodeMapLen(len(fields)); err != nil { + return err + } + + for _, f := range fields { + if err := e.EncodeString(f.name); err != nil { + return err + } + if err := f.EncodeValue(e, strct); err != nil { + return err + } + } + + return nil +} + +func encodeStructValueAsArray(e *Encoder, strct reflect.Value, fields []*field) error { + if err := e.EncodeArrayLen(len(fields)); err != nil { + return err + } + for _, f := range fields { + if err := f.EncodeValue(e, strct); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/encode_number.go b/vendor/github.com/vmihailenco/msgpack/v4/encode_number.go new file mode 100644 index 000000000..bf3c2f851 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/encode_number.go @@ -0,0 +1,244 @@ +package msgpack + +import ( + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +// EncodeUint8 encodes an uint8 in 2 bytes preserving type of the number. +func (e *Encoder) EncodeUint8(n uint8) error { + return e.write1(codes.Uint8, n) +} + +func (e *Encoder) encodeUint8Cond(n uint8) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint8(n) +} + +// EncodeUint16 encodes an uint16 in 3 bytes preserving type of the number. +func (e *Encoder) EncodeUint16(n uint16) error { + return e.write2(codes.Uint16, n) +} + +func (e *Encoder) encodeUint16Cond(n uint16) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint16(n) +} + +// EncodeUint32 encodes an uint16 in 5 bytes preserving type of the number. +func (e *Encoder) EncodeUint32(n uint32) error { + return e.write4(codes.Uint32, n) +} + +func (e *Encoder) encodeUint32Cond(n uint32) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint32(n) +} + +// EncodeUint64 encodes an uint16 in 9 bytes preserving type of the number. +func (e *Encoder) EncodeUint64(n uint64) error { + return e.write8(codes.Uint64, n) +} + +func (e *Encoder) encodeUint64Cond(n uint64) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeUint(n) + } + return e.EncodeUint64(n) +} + +// EncodeInt8 encodes an int8 in 2 bytes preserving type of the number. +func (e *Encoder) EncodeInt8(n int8) error { + return e.write1(codes.Int8, uint8(n)) +} + +func (e *Encoder) encodeInt8Cond(n int8) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt8(n) +} + +// EncodeInt16 encodes an int16 in 3 bytes preserving type of the number. +func (e *Encoder) EncodeInt16(n int16) error { + return e.write2(codes.Int16, uint16(n)) +} + +func (e *Encoder) encodeInt16Cond(n int16) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt16(n) +} + +// EncodeInt32 encodes an int32 in 5 bytes preserving type of the number. +func (e *Encoder) EncodeInt32(n int32) error { + return e.write4(codes.Int32, uint32(n)) +} + +func (e *Encoder) encodeInt32Cond(n int32) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt32(n) +} + +// EncodeInt64 encodes an int64 in 9 bytes preserving type of the number. +func (e *Encoder) EncodeInt64(n int64) error { + return e.write8(codes.Int64, uint64(n)) +} + +func (e *Encoder) encodeInt64Cond(n int64) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeInt(n) + } + return e.EncodeInt64(n) +} + +// EncodeUnsignedNumber encodes an uint64 in 1, 2, 3, 5, or 9 bytes. +// Type of the number is lost during encoding. +func (e *Encoder) EncodeUint(n uint64) error { + if n <= math.MaxInt8 { + return e.w.WriteByte(byte(n)) + } + if n <= math.MaxUint8 { + return e.EncodeUint8(uint8(n)) + } + if n <= math.MaxUint16 { + return e.EncodeUint16(uint16(n)) + } + if n <= math.MaxUint32 { + return e.EncodeUint32(uint32(n)) + } + return e.EncodeUint64(n) +} + +// EncodeNumber encodes an int64 in 1, 2, 3, 5, or 9 bytes. +// Type of the number is lost during encoding. +func (e *Encoder) EncodeInt(n int64) error { + if n >= 0 { + return e.EncodeUint(uint64(n)) + } + if n >= int64(int8(codes.NegFixedNumLow)) { + return e.w.WriteByte(byte(n)) + } + if n >= math.MinInt8 { + return e.EncodeInt8(int8(n)) + } + if n >= math.MinInt16 { + return e.EncodeInt16(int16(n)) + } + if n >= math.MinInt32 { + return e.EncodeInt32(int32(n)) + } + return e.EncodeInt64(n) +} + +func (e *Encoder) EncodeFloat32(n float32) error { + if e.flags&useCompactFloatsFlag != 0 { + if float32(int64(n)) == n { + return e.EncodeInt(int64(n)) + } + } + return e.write4(codes.Float, math.Float32bits(n)) +} + +func (e *Encoder) EncodeFloat64(n float64) error { + if e.flags&useCompactFloatsFlag != 0 { + // Both NaN and Inf convert to int64(-0x8000000000000000) + // If n is NaN then it never compares true with any other value + // If n is Inf then it doesn't convert from int64 back to +/-Inf + // In both cases the comparison works. + if float64(int64(n)) == n { + return e.EncodeInt(int64(n)) + } + } + return e.write8(codes.Double, math.Float64bits(n)) +} + +func (e *Encoder) write1(code codes.Code, n uint8) error { + e.buf = e.buf[:2] + e.buf[0] = byte(code) + e.buf[1] = n + return e.write(e.buf) +} + +func (e *Encoder) write2(code codes.Code, n uint16) error { + e.buf = e.buf[:3] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 8) + e.buf[2] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write4(code codes.Code, n uint32) error { + e.buf = e.buf[:5] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 24) + e.buf[2] = byte(n >> 16) + e.buf[3] = byte(n >> 8) + e.buf[4] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write8(code codes.Code, n uint64) error { + e.buf = e.buf[:9] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 56) + e.buf[2] = byte(n >> 48) + e.buf[3] = byte(n >> 40) + e.buf[4] = byte(n >> 32) + e.buf[5] = byte(n >> 24) + e.buf[6] = byte(n >> 16) + e.buf[7] = byte(n >> 8) + e.buf[8] = byte(n) + return e.write(e.buf) +} + +func encodeUint8CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint8Cond(uint8(v.Uint())) +} + +func encodeUint16CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint16Cond(uint16(v.Uint())) +} + +func encodeUint32CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint32Cond(uint32(v.Uint())) +} + +func encodeUint64CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint64Cond(v.Uint()) +} + +func encodeInt8CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt8Cond(int8(v.Int())) +} + +func encodeInt16CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt16Cond(int16(v.Int())) +} + +func encodeInt32CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt32Cond(int32(v.Int())) +} + +func encodeInt64CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt64Cond(v.Int()) +} + +func encodeFloat32Value(e *Encoder, v reflect.Value) error { + return e.EncodeFloat32(float32(v.Float())) +} + +func encodeFloat64Value(e *Encoder, v reflect.Value) error { + return e.EncodeFloat64(v.Float()) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/encode_slice.go b/vendor/github.com/vmihailenco/msgpack/v4/encode_slice.go new file mode 100644 index 000000000..69a9618e0 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/encode_slice.go @@ -0,0 +1,131 @@ +package msgpack + +import ( + "reflect" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +var sliceStringType = reflect.TypeOf(([]string)(nil)) + +func encodeStringValue(e *Encoder, v reflect.Value) error { + return e.EncodeString(v.String()) +} + +func encodeByteSliceValue(e *Encoder, v reflect.Value) error { + return e.EncodeBytes(v.Bytes()) +} + +func encodeByteArrayValue(e *Encoder, v reflect.Value) error { + if err := e.EncodeBytesLen(v.Len()); err != nil { + return err + } + + if v.CanAddr() { + b := v.Slice(0, v.Len()).Bytes() + return e.write(b) + } + + e.buf = grow(e.buf, v.Len()) + reflect.Copy(reflect.ValueOf(e.buf), v) + return e.write(e.buf) +} + +func grow(b []byte, n int) []byte { + if cap(b) >= n { + return b[:n] + } + b = b[:cap(b)] + b = append(b, make([]byte, n-len(b))...) + return b +} + +func (e *Encoder) EncodeBytesLen(l int) error { + if l < 256 { + return e.write1(codes.Bin8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Bin16, uint16(l)) + } + return e.write4(codes.Bin32, uint32(l)) +} + +func (e *Encoder) encodeStrLen(l int) error { + if l < 32 { + return e.writeCode(codes.FixedStrLow | codes.Code(l)) + } + if l < 256 { + return e.write1(codes.Str8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Str16, uint16(l)) + } + return e.write4(codes.Str32, uint32(l)) +} + +func (e *Encoder) EncodeString(v string) error { + if err := e.encodeStrLen(len(v)); err != nil { + return err + } + return e.writeString(v) +} + +func (e *Encoder) EncodeBytes(v []byte) error { + if v == nil { + return e.EncodeNil() + } + if err := e.EncodeBytesLen(len(v)); err != nil { + return err + } + return e.write(v) +} + +func (e *Encoder) EncodeArrayLen(l int) error { + if l < 16 { + return e.writeCode(codes.FixedArrayLow | codes.Code(l)) + } + if l < 65536 { + return e.write2(codes.Array16, uint16(l)) + } + return e.write4(codes.Array32, uint32(l)) +} + +func encodeStringSliceValue(e *Encoder, v reflect.Value) error { + ss := v.Convert(sliceStringType).Interface().([]string) + return e.encodeStringSlice(ss) +} + +func (e *Encoder) encodeStringSlice(s []string) error { + if s == nil { + return e.EncodeNil() + } + if err := e.EncodeArrayLen(len(s)); err != nil { + return err + } + for _, v := range s { + if err := e.EncodeString(v); err != nil { + return err + } + } + return nil +} + +func encodeSliceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return encodeArrayValue(e, v) +} + +func encodeArrayValue(e *Encoder, v reflect.Value) error { + l := v.Len() + if err := e.EncodeArrayLen(l); err != nil { + return err + } + for i := 0; i < l; i++ { + if err := e.EncodeValue(v.Index(i)); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/encode_value.go b/vendor/github.com/vmihailenco/msgpack/v4/encode_value.go new file mode 100644 index 000000000..335fcdb7e --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/encode_value.go @@ -0,0 +1,216 @@ +package msgpack + +import ( + "encoding" + "fmt" + "reflect" +) + +var valueEncoders []encoderFunc + +//nolint:gochecknoinits +func init() { + valueEncoders = []encoderFunc{ + reflect.Bool: encodeBoolValue, + reflect.Int: encodeInt64CondValue, + reflect.Int8: encodeInt8CondValue, + reflect.Int16: encodeInt16CondValue, + reflect.Int32: encodeInt32CondValue, + reflect.Int64: encodeInt64CondValue, + reflect.Uint: encodeUint64CondValue, + reflect.Uint8: encodeUint8CondValue, + reflect.Uint16: encodeUint16CondValue, + reflect.Uint32: encodeUint32CondValue, + reflect.Uint64: encodeUint64CondValue, + reflect.Float32: encodeFloat32Value, + reflect.Float64: encodeFloat64Value, + reflect.Complex64: encodeUnsupportedValue, + reflect.Complex128: encodeUnsupportedValue, + reflect.Array: encodeArrayValue, + reflect.Chan: encodeUnsupportedValue, + reflect.Func: encodeUnsupportedValue, + reflect.Interface: encodeInterfaceValue, + reflect.Map: encodeMapValue, + reflect.Ptr: encodeUnsupportedValue, + reflect.Slice: encodeSliceValue, + reflect.String: encodeStringValue, + reflect.Struct: encodeStructValue, + reflect.UnsafePointer: encodeUnsupportedValue, + } +} + +func getEncoder(typ reflect.Type) encoderFunc { + if v, ok := typeEncMap.Load(typ); ok { + return v.(encoderFunc) + } + fn := _getEncoder(typ) + typeEncMap.Store(typ, fn) + return fn +} + +func _getEncoder(typ reflect.Type) encoderFunc { + kind := typ.Kind() + + if kind == reflect.Ptr { + if _, ok := typeEncMap.Load(typ.Elem()); ok { + return ptrEncoderFunc(typ) + } + } + + if typ.Implements(customEncoderType) { + return encodeCustomValue + } + if typ.Implements(marshalerType) { + return marshalValue + } + if typ.Implements(binaryMarshalerType) { + return marshalBinaryValue + } + + // Addressable struct field value. + if kind != reflect.Ptr { + ptr := reflect.PtrTo(typ) + if ptr.Implements(customEncoderType) { + return encodeCustomValuePtr + } + if ptr.Implements(marshalerType) { + return marshalValuePtr + } + if ptr.Implements(binaryMarshalerType) { + return marshalBinaryValuePtr + } + } + + if typ == errorType { + return encodeErrorValue + } + + switch kind { + case reflect.Ptr: + return ptrEncoderFunc(typ) + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + return encodeByteSliceValue + } + if elem == stringType { + return encodeStringSliceValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return encodeByteArrayValue + } + case reflect.Map: + if typ.Key() == stringType { + switch typ.Elem() { + case stringType: + return encodeMapStringStringValue + case interfaceType: + return encodeMapStringInterfaceValue + } + } + } + + return valueEncoders[kind] +} + +func ptrEncoderFunc(typ reflect.Type) encoderFunc { + encoder := getEncoder(typ.Elem()) + return func(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return encoder(e, v.Elem()) + } +} + +func encodeCustomValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + encoder := v.Addr().Interface().(CustomEncoder) + return encoder.EncodeMsgpack(e) +} + +func encodeCustomValue(e *Encoder, v reflect.Value) error { + if nilable(v) && v.IsNil() { + return e.EncodeNil() + } + + encoder := v.Interface().(CustomEncoder) + return encoder.EncodeMsgpack(e) +} + +func marshalValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + return marshalValue(e, v.Addr()) +} + +func marshalValue(e *Encoder, v reflect.Value) error { + if nilable(v) && v.IsNil() { + return e.EncodeNil() + } + + marshaler := v.Interface().(Marshaler) + b, err := marshaler.MarshalMsgpack() + if err != nil { + return err + } + _, err = e.w.Write(b) + return err +} + +func encodeBoolValue(e *Encoder, v reflect.Value) error { + return e.EncodeBool(v.Bool()) +} + +func encodeInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return e.EncodeValue(v.Elem()) +} + +func encodeErrorValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return e.EncodeString(v.Interface().(error).Error()) +} + +func encodeUnsupportedValue(e *Encoder, v reflect.Value) error { + return fmt.Errorf("msgpack: Encode(unsupported %s)", v.Type()) +} + +func nilable(v reflect.Value) bool { + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return true + } + return false +} + +//------------------------------------------------------------------------------ + +func marshalBinaryValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + return marshalBinaryValue(e, v.Addr()) +} + +func marshalBinaryValue(e *Encoder, v reflect.Value) error { + if nilable(v) && v.IsNil() { + return e.EncodeNil() + } + + marshaler := v.Interface().(encoding.BinaryMarshaler) + data, err := marshaler.MarshalBinary() + if err != nil { + return err + } + + return e.EncodeBytes(data) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/ext.go b/vendor/github.com/vmihailenco/msgpack/v4/ext.go new file mode 100644 index 000000000..17e709bc8 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/ext.go @@ -0,0 +1,244 @@ +package msgpack + +import ( + "bytes" + "fmt" + "reflect" + "sync" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +type extInfo struct { + Type reflect.Type + Decoder decoderFunc +} + +var extTypes = make(map[int8]*extInfo) + +var bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +// RegisterExt records a type, identified by a value for that type, +// under the provided id. That id will identify the concrete type of a value +// sent or received as an interface variable. Only types that will be +// transferred as implementations of interface values need to be registered. +// Expecting to be used only during initialization, it panics if the mapping +// between types and ids is not a bijection. +func RegisterExt(id int8, value interface{}) { + typ := reflect.TypeOf(value) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + ptr := reflect.PtrTo(typ) + + if _, ok := extTypes[id]; ok { + panic(fmt.Errorf("msgpack: ext with id=%d is already registered", id)) + } + + registerExt(id, ptr, getEncoder(ptr), getDecoder(ptr)) + registerExt(id, typ, getEncoder(typ), getDecoder(typ)) +} + +func registerExt(id int8, typ reflect.Type, enc encoderFunc, dec decoderFunc) { + if enc != nil { + typeEncMap.Store(typ, makeExtEncoder(id, enc)) + } + if dec != nil { + extTypes[id] = &extInfo{ + Type: typ, + Decoder: dec, + } + typeDecMap.Store(typ, makeExtDecoder(id, dec)) + } +} + +func (e *Encoder) EncodeExtHeader(typeID int8, length int) error { + if err := e.encodeExtLen(length); err != nil { + return err + } + if err := e.w.WriteByte(byte(typeID)); err != nil { + return err + } + return nil +} + +func makeExtEncoder(typeID int8, enc encoderFunc) encoderFunc { + return func(e *Encoder, v reflect.Value) error { + buf := bufferPool.Get().(*bytes.Buffer) + defer bufferPool.Put(buf) + buf.Reset() + + oldw := e.w + e.w = buf + err := enc(e, v) + e.w = oldw + + if err != nil { + return err + } + + err = e.EncodeExtHeader(typeID, buf.Len()) + if err != nil { + return err + } + return e.write(buf.Bytes()) + } +} + +func makeExtDecoder(typeID int8, dec decoderFunc) decoderFunc { + return func(d *Decoder, v reflect.Value) error { + c, err := d.PeekCode() + if err != nil { + return err + } + + if !codes.IsExt(c) { + return dec(d, v) + } + + id, extLen, err := d.DecodeExtHeader() + if err != nil { + return err + } + + if id != typeID { + return fmt.Errorf("msgpack: got ext type=%d, wanted %d", id, typeID) + } + + d.extLen = extLen + return dec(d, v) + } +} + +func (e *Encoder) encodeExtLen(l int) error { + switch l { + case 1: + return e.writeCode(codes.FixExt1) + case 2: + return e.writeCode(codes.FixExt2) + case 4: + return e.writeCode(codes.FixExt4) + case 8: + return e.writeCode(codes.FixExt8) + case 16: + return e.writeCode(codes.FixExt16) + } + if l < 256 { + return e.write1(codes.Ext8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Ext16, uint16(l)) + } + return e.write4(codes.Ext32, uint32(l)) +} + +func (d *Decoder) parseExtLen(c codes.Code) (int, error) { + switch c { + case codes.FixExt1: + return 1, nil + case codes.FixExt2: + return 2, nil + case codes.FixExt4: + return 4, nil + case codes.FixExt8: + return 8, nil + case codes.FixExt16: + return 16, nil + case codes.Ext8: + n, err := d.uint8() + return int(n), err + case codes.Ext16: + n, err := d.uint16() + return int(n), err + case codes.Ext32: + n, err := d.uint32() + return int(n), err + default: + return 0, fmt.Errorf("msgpack: invalid code=%x decoding ext length", c) + } +} + +func (d *Decoder) extHeader(c codes.Code) (int8, int, error) { + length, err := d.parseExtLen(c) + if err != nil { + return 0, 0, err + } + + typeID, err := d.readCode() + if err != nil { + return 0, 0, err + } + + return int8(typeID), length, nil +} + +func (d *Decoder) DecodeExtHeader() (typeID int8, length int, err error) { + c, err := d.readCode() + if err != nil { + return + } + return d.extHeader(c) +} + +func (d *Decoder) extInterface(c codes.Code) (interface{}, error) { + extID, extLen, err := d.extHeader(c) + if err != nil { + return nil, err + } + + info, ok := extTypes[extID] + if !ok { + return nil, fmt.Errorf("msgpack: unknown ext id=%d", extID) + } + + v := reflect.New(info.Type) + + d.extLen = extLen + err = info.Decoder(d, v.Elem()) + d.extLen = 0 + if err != nil { + return nil, err + } + + return v.Interface(), nil +} + +func (d *Decoder) skipExt(c codes.Code) error { + n, err := d.parseExtLen(c) + if err != nil { + return err + } + return d.skipN(n + 1) +} + +func (d *Decoder) skipExtHeader(c codes.Code) error { + // Read ext type. + _, err := d.readCode() + if err != nil { + return err + } + // Read ext body len. + for i := 0; i < extHeaderLen(c); i++ { + _, err := d.readCode() + if err != nil { + return err + } + } + return nil +} + +func extHeaderLen(c codes.Code) int { + switch c { + case codes.Ext8: + return 1 + case codes.Ext16: + return 2 + case codes.Ext32: + return 4 + } + return 0 +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/go.mod b/vendor/github.com/vmihailenco/msgpack/v4/go.mod new file mode 100644 index 000000000..d935fa654 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/go.mod @@ -0,0 +1,13 @@ +module github.com/vmihailenco/msgpack/v4 + +require ( + github.com/davecgh/go-spew v1.1.1 + github.com/golang/protobuf v1.3.4 // indirect + github.com/kr/pretty v0.1.0 // indirect + github.com/vmihailenco/tagparser v0.1.1 + golang.org/x/net v0.0.0-20200301022130-244492dfa37a // indirect + google.golang.org/appengine v1.6.5 + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 +) + +go 1.11 diff --git a/vendor/github.com/vmihailenco/msgpack/v4/go.sum b/vendor/github.com/vmihailenco/msgpack/v4/go.sum new file mode 100644 index 000000000..be608ec08 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/go.sum @@ -0,0 +1,24 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= +github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/vmihailenco/msgpack/v4/intern.go b/vendor/github.com/vmihailenco/msgpack/v4/intern.go new file mode 100644 index 000000000..6ca569273 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/intern.go @@ -0,0 +1,236 @@ +package msgpack + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +var internStringExtID int8 = -128 + +var errUnexpectedCode = errors.New("msgpack: unexpected code") + +func encodeInternInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + v = v.Elem() + if v.Kind() == reflect.String { + return encodeInternStringValue(e, v) + } + return e.EncodeValue(v) +} + +func encodeInternStringValue(e *Encoder, v reflect.Value) error { + s := v.String() + + if s != "" { + if idx, ok := e.intern[s]; ok { + return e.internStringIndex(idx) + } + + if e.intern == nil { + e.intern = make(map[string]int) + } + + idx := len(e.intern) + e.intern[s] = idx + } + + return e.EncodeString(s) +} + +func (e *Encoder) internStringIndex(idx int) error { + if idx < math.MaxUint8 { + if err := e.writeCode(codes.FixExt1); err != nil { + return err + } + if err := e.w.WriteByte(byte(internStringExtID)); err != nil { + return err + } + return e.w.WriteByte(byte(idx)) + } + + if idx < math.MaxUint16 { + if err := e.writeCode(codes.FixExt2); err != nil { + return err + } + if err := e.w.WriteByte(byte(internStringExtID)); err != nil { + return err + } + if err := e.w.WriteByte(byte(idx >> 8)); err != nil { + return err + } + return e.w.WriteByte(byte(idx)) + } + + if int64(idx) < math.MaxUint32 { + if err := e.writeCode(codes.FixExt4); err != nil { + return err + } + if err := e.w.WriteByte(byte(internStringExtID)); err != nil { + return err + } + if err := e.w.WriteByte(byte(idx >> 24)); err != nil { + return err + } + if err := e.w.WriteByte(byte(idx >> 16)); err != nil { + return err + } + if err := e.w.WriteByte(byte(idx >> 8)); err != nil { + return err + } + return e.w.WriteByte(byte(idx)) + } + + return fmt.Errorf("msgpack: intern string index=%d is too large", idx) +} + +//------------------------------------------------------------------------------ + +func decodeInternInterfaceValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + s, err := d.internString(c) + if err == nil { + v.Set(reflect.ValueOf(s)) + return nil + } + if err != nil && err != errUnexpectedCode { + return err + } + + if err := d.s.UnreadByte(); err != nil { + return err + } + + return decodeInterfaceValue(d, v) +} + +func decodeInternStringValue(d *Decoder, v reflect.Value) error { + if err := mustSet(v); err != nil { + return err + } + + c, err := d.readCode() + if err != nil { + return err + } + + s, err := d.internString(c) + if err != nil { + if err == errUnexpectedCode { + return fmt.Errorf("msgpack: invalid code=%x decoding intern string", c) + } + return err + } + + v.SetString(s) + return nil +} + +func (d *Decoder) internString(c codes.Code) (string, error) { + if codes.IsFixedString(c) { + n := int(c & codes.FixedStrMask) + return d.internStringWithLen(n) + } + + switch c { + case codes.FixExt1, codes.FixExt2, codes.FixExt4: + typeID, length, err := d.extHeader(c) + if err != nil { + return "", err + } + if typeID != internStringExtID { + err := fmt.Errorf("msgpack: got ext type=%d, wanted %d", + typeID, internStringExtID) + return "", err + } + + idx, err := d.internStringIndex(length) + if err != nil { + return "", err + } + + return d.internStringAtIndex(idx) + case codes.Str8, codes.Bin8: + n, err := d.uint8() + if err != nil { + return "", err + } + return d.internStringWithLen(int(n)) + case codes.Str16, codes.Bin16: + n, err := d.uint16() + if err != nil { + return "", err + } + return d.internStringWithLen(int(n)) + case codes.Str32, codes.Bin32: + n, err := d.uint32() + if err != nil { + return "", err + } + return d.internStringWithLen(int(n)) + } + + return "", errUnexpectedCode +} + +func (d *Decoder) internStringIndex(length int) (int, error) { + switch length { + case 1: + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + return int(c), nil + case 2: + b, err := d.readN(2) + if err != nil { + return 0, err + } + n := binary.BigEndian.Uint16(b) + return int(n), nil + case 4: + b, err := d.readN(4) + if err != nil { + return 0, err + } + n := binary.BigEndian.Uint32(b) + return int(n), nil + } + + err := fmt.Errorf("msgpack: unsupported intern string index length=%d", length) + return 0, err +} + +func (d *Decoder) internStringAtIndex(idx int) (string, error) { + if idx >= len(d.intern) { + err := fmt.Errorf("msgpack: intern string with index=%d does not exist", idx) + return "", err + } + return d.intern[idx], nil +} + +func (d *Decoder) internStringWithLen(n int) (string, error) { + if n <= 0 { + return "", nil + } + + s, err := d.stringWithLen(n) + if err != nil { + return "", err + } + + d.intern = append(d.intern, s) + + return s, nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/msgpack.go b/vendor/github.com/vmihailenco/msgpack/v4/msgpack.go new file mode 100644 index 000000000..220b43c47 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/msgpack.go @@ -0,0 +1,17 @@ +package msgpack + +type Marshaler interface { + MarshalMsgpack() ([]byte, error) +} + +type Unmarshaler interface { + UnmarshalMsgpack([]byte) error +} + +type CustomEncoder interface { + EncodeMsgpack(*Encoder) error +} + +type CustomDecoder interface { + DecodeMsgpack(*Decoder) error +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/safe.go b/vendor/github.com/vmihailenco/msgpack/v4/safe.go new file mode 100644 index 000000000..8352c9dce --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/safe.go @@ -0,0 +1,13 @@ +// +build appengine + +package msgpack + +// bytesToString converts byte slice to string. +func bytesToString(b []byte) string { + return string(b) +} + +// stringToBytes converts string to byte slice. +func stringToBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/time.go b/vendor/github.com/vmihailenco/msgpack/v4/time.go new file mode 100644 index 000000000..bf53eb2a3 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/time.go @@ -0,0 +1,149 @@ +package msgpack + +import ( + "encoding/binary" + "fmt" + "reflect" + "time" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +var timeExtID int8 = -1 + +var timePtrType = reflect.TypeOf((*time.Time)(nil)) + +//nolint:gochecknoinits +func init() { + registerExt(timeExtID, timePtrType.Elem(), encodeTimeValue, decodeTimeValue) +} + +func (e *Encoder) EncodeTime(tm time.Time) error { + b := e.encodeTime(tm) + if err := e.encodeExtLen(len(b)); err != nil { + return err + } + if err := e.w.WriteByte(byte(timeExtID)); err != nil { + return err + } + return e.write(b) +} + +func (e *Encoder) encodeTime(tm time.Time) []byte { + secs := uint64(tm.Unix()) + if secs>>34 == 0 { + data := uint64(tm.Nanosecond())<<34 | secs + if data&0xffffffff00000000 == 0 { + b := e.timeBuf[:4] + binary.BigEndian.PutUint32(b, uint32(data)) + return b + } + b := e.timeBuf[:8] + binary.BigEndian.PutUint64(b, data) + return b + } + + b := e.timeBuf[:12] + binary.BigEndian.PutUint32(b, uint32(tm.Nanosecond())) + binary.BigEndian.PutUint64(b[4:], secs) + return b +} + +func (d *Decoder) DecodeTime() (time.Time, error) { + tm, err := d.decodeTime() + if err != nil { + return tm, err + } + + if tm.IsZero() { + // Assume that zero time does not have timezone information. + return tm.UTC(), nil + } + return tm, nil +} + +func (d *Decoder) decodeTime() (time.Time, error) { + extLen := d.extLen + d.extLen = 0 + if extLen == 0 { + c, err := d.readCode() + if err != nil { + return time.Time{}, err + } + + // Legacy format. + if c == codes.FixedArrayLow|2 { + sec, err := d.DecodeInt64() + if err != nil { + return time.Time{}, err + } + + nsec, err := d.DecodeInt64() + if err != nil { + return time.Time{}, err + } + + return time.Unix(sec, nsec), nil + } + + if codes.IsString(c) { + s, err := d.string(c) + if err != nil { + return time.Time{}, err + } + return time.Parse(time.RFC3339Nano, s) + } + + extLen, err = d.parseExtLen(c) + if err != nil { + return time.Time{}, err + } + + // Skip ext id. + _, err = d.s.ReadByte() + if err != nil { + return time.Time{}, nil + } + } + + b, err := d.readN(extLen) + if err != nil { + return time.Time{}, err + } + + switch len(b) { + case 4: + sec := binary.BigEndian.Uint32(b) + return time.Unix(int64(sec), 0), nil + case 8: + sec := binary.BigEndian.Uint64(b) + nsec := int64(sec >> 34) + sec &= 0x00000003ffffffff + return time.Unix(int64(sec), nsec), nil + case 12: + nsec := binary.BigEndian.Uint32(b) + sec := binary.BigEndian.Uint64(b[4:]) + return time.Unix(int64(sec), int64(nsec)), nil + default: + err = fmt.Errorf("msgpack: invalid ext len=%d decoding time", extLen) + return time.Time{}, err + } +} + +func encodeTimeValue(e *Encoder, v reflect.Value) error { + tm := v.Interface().(time.Time) + b := e.encodeTime(tm) + return e.write(b) +} + +func decodeTimeValue(d *Decoder, v reflect.Value) error { + tm, err := d.DecodeTime() + if err != nil { + return err + } + + ptr := v.Addr().Interface().(*time.Time) + *ptr = tm + + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/types.go b/vendor/github.com/vmihailenco/msgpack/v4/types.go new file mode 100644 index 000000000..08cf099dd --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/types.go @@ -0,0 +1,382 @@ +package msgpack + +import ( + "encoding" + "fmt" + "log" + "reflect" + "sync" + + "github.com/vmihailenco/tagparser" +) + +var errorType = reflect.TypeOf((*error)(nil)).Elem() + +var ( + customEncoderType = reflect.TypeOf((*CustomEncoder)(nil)).Elem() + customDecoderType = reflect.TypeOf((*CustomDecoder)(nil)).Elem() +) + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +var ( + binaryMarshalerType = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + binaryUnmarshalerType = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() +) + +type ( + encoderFunc func(*Encoder, reflect.Value) error + decoderFunc func(*Decoder, reflect.Value) error +) + +var ( + typeEncMap sync.Map + typeDecMap sync.Map +) + +// Register registers encoder and decoder functions for a value. +// This is low level API and in most cases you should prefer implementing +// Marshaler/CustomEncoder and Unmarshaler/CustomDecoder interfaces. +func Register(value interface{}, enc encoderFunc, dec decoderFunc) { + typ := reflect.TypeOf(value) + if enc != nil { + typeEncMap.Store(typ, enc) + } + if dec != nil { + typeDecMap.Store(typ, dec) + } +} + +//------------------------------------------------------------------------------ + +var ( + structs = newStructCache(false) + jsonStructs = newStructCache(true) +) + +type structCache struct { + m sync.Map + + useJSONTag bool +} + +func newStructCache(useJSONTag bool) *structCache { + return &structCache{ + useJSONTag: useJSONTag, + } +} + +func (m *structCache) Fields(typ reflect.Type) *fields { + if v, ok := m.m.Load(typ); ok { + return v.(*fields) + } + + fs := getFields(typ, m.useJSONTag) + m.m.Store(typ, fs) + return fs +} + +//------------------------------------------------------------------------------ + +type field struct { + name string + index []int + omitEmpty bool + encoder encoderFunc + decoder decoderFunc +} + +func (f *field) Omit(strct reflect.Value) bool { + v, isNil := fieldByIndex(strct, f.index) + if isNil { + return true + } + return f.omitEmpty && isEmptyValue(v) +} + +func (f *field) EncodeValue(e *Encoder, strct reflect.Value) error { + v, isNil := fieldByIndex(strct, f.index) + if isNil { + return e.EncodeNil() + } + return f.encoder(e, v) +} + +func (f *field) DecodeValue(d *Decoder, strct reflect.Value) error { + v := fieldByIndexAlloc(strct, f.index) + return f.decoder(d, v) +} + +//------------------------------------------------------------------------------ + +type fields struct { + Type reflect.Type + Map map[string]*field + List []*field + AsArray bool + + hasOmitEmpty bool +} + +func newFields(typ reflect.Type) *fields { + return &fields{ + Type: typ, + Map: make(map[string]*field, typ.NumField()), + List: make([]*field, 0, typ.NumField()), + } +} + +func (fs *fields) Add(field *field) { + fs.warnIfFieldExists(field.name) + fs.Map[field.name] = field + fs.List = append(fs.List, field) + if field.omitEmpty { + fs.hasOmitEmpty = true + } +} + +func (fs *fields) warnIfFieldExists(name string) { + if _, ok := fs.Map[name]; ok { + log.Printf("msgpack: %s already has field=%s", fs.Type, name) + } +} + +func (fs *fields) OmitEmpty(strct reflect.Value) []*field { + if !fs.hasOmitEmpty { + return fs.List + } + + fields := make([]*field, 0, len(fs.List)) + + for _, f := range fs.List { + if !f.Omit(strct) { + fields = append(fields, f) + } + } + + return fields +} + +func getFields(typ reflect.Type, useJSONTag bool) *fields { + fs := newFields(typ) + + var omitEmpty bool + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + + tagStr := f.Tag.Get("msgpack") + if useJSONTag && tagStr == "" { + tagStr = f.Tag.Get("json") + } + + tag := tagparser.Parse(tagStr) + if tag.Name == "-" { + continue + } + + if f.Name == "_msgpack" { + if tag.HasOption("asArray") { + fs.AsArray = true + } + if tag.HasOption("omitempty") { + omitEmpty = true + } + } + + if f.PkgPath != "" && !f.Anonymous { + continue + } + + field := &field{ + name: tag.Name, + index: f.Index, + omitEmpty: omitEmpty || tag.HasOption("omitempty"), + } + + if tag.HasOption("intern") { + switch f.Type.Kind() { + case reflect.Interface: + field.encoder = encodeInternInterfaceValue + field.decoder = decodeInternInterfaceValue + case reflect.String: + field.encoder = encodeInternStringValue + field.decoder = decodeInternStringValue + default: + err := fmt.Errorf("msgpack: intern strings are not supported on %s", f.Type) + panic(err) + } + } else { + field.encoder = getEncoder(f.Type) + field.decoder = getDecoder(f.Type) + } + + if field.name == "" { + field.name = f.Name + } + + if f.Anonymous && !tag.HasOption("noinline") { + inline := tag.HasOption("inline") + if inline { + inlineFields(fs, f.Type, field, useJSONTag) + } else { + inline = shouldInline(fs, f.Type, field, useJSONTag) + } + + if inline { + if _, ok := fs.Map[field.name]; ok { + log.Printf("msgpack: %s already has field=%s", fs.Type, field.name) + } + fs.Map[field.name] = field + continue + } + } + + fs.Add(field) + + if alias, ok := tag.Options["alias"]; ok { + fs.warnIfFieldExists(alias) + fs.Map[alias] = field + } + } + return fs +} + +var ( + encodeStructValuePtr uintptr + decodeStructValuePtr uintptr +) + +//nolint:gochecknoinits +func init() { + encodeStructValuePtr = reflect.ValueOf(encodeStructValue).Pointer() + decodeStructValuePtr = reflect.ValueOf(decodeStructValue).Pointer() +} + +func inlineFields(fs *fields, typ reflect.Type, f *field, useJSONTag bool) { + inlinedFields := getFields(typ, useJSONTag).List + for _, field := range inlinedFields { + if _, ok := fs.Map[field.name]; ok { + // Don't inline shadowed fields. + continue + } + field.index = append(f.index, field.index...) + fs.Add(field) + } +} + +func shouldInline(fs *fields, typ reflect.Type, f *field, useJSONTag bool) bool { + var encoder encoderFunc + var decoder decoderFunc + + if typ.Kind() == reflect.Struct { + encoder = f.encoder + decoder = f.decoder + } else { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + encoder = getEncoder(typ) + decoder = getDecoder(typ) + } + if typ.Kind() != reflect.Struct { + return false + } + } + + if reflect.ValueOf(encoder).Pointer() != encodeStructValuePtr { + return false + } + if reflect.ValueOf(decoder).Pointer() != decodeStructValuePtr { + return false + } + + inlinedFields := getFields(typ, useJSONTag).List + for _, field := range inlinedFields { + if _, ok := fs.Map[field.name]; ok { + // Don't auto inline if there are shadowed fields. + return false + } + } + + for _, field := range inlinedFields { + field.index = append(f.index, field.index...) + fs.Add(field) + } + return true +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func fieldByIndex(v reflect.Value, index []int) (_ reflect.Value, isNil bool) { + if len(index) == 1 { + return v.Field(index[0]), false + } + + for i, idx := range index { + if i > 0 { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return v, true + } + v = v.Elem() + } + } + v = v.Field(idx) + } + + return v, false +} + +func fieldByIndexAlloc(v reflect.Value, index []int) reflect.Value { + if len(index) == 1 { + return v.Field(index[0]) + } + + for i, idx := range index { + if i > 0 { + var ok bool + v, ok = indirectNew(v) + if !ok { + return v + } + } + v = v.Field(idx) + } + + return v +} + +func indirectNew(v reflect.Value) (reflect.Value, bool) { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + if !v.CanSet() { + return v, false + } + elemType := v.Type().Elem() + if elemType.Kind() != reflect.Struct { + return v, false + } + v.Set(reflect.New(elemType)) + } + v = v.Elem() + } + return v, true +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/unsafe.go b/vendor/github.com/vmihailenco/msgpack/v4/unsafe.go new file mode 100644 index 000000000..50c0da8b5 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/unsafe.go @@ -0,0 +1,22 @@ +// +build !appengine + +package msgpack + +import ( + "unsafe" +) + +// bytesToString converts byte slice to string. +func bytesToString(b []byte) string { //nolint:deadcode,unused + return *(*string)(unsafe.Pointer(&b)) +} + +// stringToBytes converts string to byte slice. +func stringToBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer( + &struct { + string + Cap int + }{s, len(s)}, + )) +} diff --git a/vendor/github.com/vmihailenco/tagparser/.travis.yml b/vendor/github.com/vmihailenco/tagparser/.travis.yml new file mode 100644 index 000000000..ec5384523 --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/.travis.yml @@ -0,0 +1,24 @@ +dist: xenial +sudo: false +language: go + +go: + - 1.11.x + - 1.12.x + - tip + +matrix: + allow_failures: + - go: tip + +env: + - GO111MODULE=on + +go_import_path: github.com/vmihailenco/tagparser + +before_install: + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.17.1 + +script: + - make + - golangci-lint run diff --git a/vendor/github.com/vmihailenco/tagparser/LICENSE b/vendor/github.com/vmihailenco/tagparser/LICENSE new file mode 100644 index 000000000..3fc93fdff --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2019 The github.com/vmihailenco/tagparser Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vmihailenco/tagparser/Makefile b/vendor/github.com/vmihailenco/tagparser/Makefile new file mode 100644 index 000000000..fe9dc5bdb --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/Makefile @@ -0,0 +1,8 @@ +all: + go test ./... + go test ./... -short -race + go test ./... -run=NONE -bench=. -benchmem + env GOOS=linux GOARCH=386 go test ./... + go vet ./... + go get github.com/gordonklaus/ineffassign + ineffassign . diff --git a/vendor/github.com/vmihailenco/tagparser/README.md b/vendor/github.com/vmihailenco/tagparser/README.md new file mode 100644 index 000000000..411aa5444 --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/README.md @@ -0,0 +1,24 @@ +# Opinionated Golang tag parser + +[![Build Status](https://travis-ci.org/vmihailenco/tagparser.png?branch=master)](https://travis-ci.org/vmihailenco/tagparser) +[![GoDoc](https://godoc.org/github.com/vmihailenco/tagparser?status.svg)](https://godoc.org/github.com/vmihailenco/tagparser) + +## Installation + +Install: + +```shell +go get -u github.com/vmihailenco/tagparser +``` + +## Quickstart + +```go +func ExampleParse() { + tag := tagparser.Parse("some_name,key:value,key2:'complex value'") + fmt.Println(tag.Name) + fmt.Println(tag.Options) + // Output: some_name + // map[key:value key2:'complex value'] +} +``` diff --git a/vendor/github.com/vmihailenco/tagparser/go.mod b/vendor/github.com/vmihailenco/tagparser/go.mod new file mode 100644 index 000000000..961a46ddb --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/go.mod @@ -0,0 +1,3 @@ +module github.com/vmihailenco/tagparser + +go 1.13 diff --git a/vendor/github.com/vmihailenco/tagparser/internal/parser/parser.go b/vendor/github.com/vmihailenco/tagparser/internal/parser/parser.go new file mode 100644 index 000000000..2de1c6f7b --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/internal/parser/parser.go @@ -0,0 +1,82 @@ +package parser + +import ( + "bytes" + + "github.com/vmihailenco/tagparser/internal" +) + +type Parser struct { + b []byte + i int +} + +func New(b []byte) *Parser { + return &Parser{ + b: b, + } +} + +func NewString(s string) *Parser { + return New(internal.StringToBytes(s)) +} + +func (p *Parser) Bytes() []byte { + return p.b[p.i:] +} + +func (p *Parser) Valid() bool { + return p.i < len(p.b) +} + +func (p *Parser) Read() byte { + if p.Valid() { + c := p.b[p.i] + p.Advance() + return c + } + return 0 +} + +func (p *Parser) Peek() byte { + if p.Valid() { + return p.b[p.i] + } + return 0 +} + +func (p *Parser) Advance() { + p.i++ +} + +func (p *Parser) Skip(skip byte) bool { + if p.Peek() == skip { + p.Advance() + return true + } + return false +} + +func (p *Parser) SkipBytes(skip []byte) bool { + if len(skip) > len(p.b[p.i:]) { + return false + } + if !bytes.Equal(p.b[p.i:p.i+len(skip)], skip) { + return false + } + p.i += len(skip) + return true +} + +func (p *Parser) ReadSep(sep byte) ([]byte, bool) { + ind := bytes.IndexByte(p.b[p.i:], sep) + if ind == -1 { + b := p.b[p.i:] + p.i = len(p.b) + return b, false + } + + b := p.b[p.i : p.i+ind] + p.i += ind + 1 + return b, true +} diff --git a/vendor/github.com/vmihailenco/tagparser/internal/safe.go b/vendor/github.com/vmihailenco/tagparser/internal/safe.go new file mode 100644 index 000000000..870fe541f --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/internal/safe.go @@ -0,0 +1,11 @@ +// +build appengine + +package internal + +func BytesToString(b []byte) string { + return string(b) +} + +func StringToBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/vmihailenco/tagparser/internal/unsafe.go b/vendor/github.com/vmihailenco/tagparser/internal/unsafe.go new file mode 100644 index 000000000..f8bc18d91 --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/internal/unsafe.go @@ -0,0 +1,22 @@ +// +build !appengine + +package internal + +import ( + "unsafe" +) + +// BytesToString converts byte slice to string. +func BytesToString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +// StringToBytes converts string to byte slice. +func StringToBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer( + &struct { + string + Cap int + }{s, len(s)}, + )) +} diff --git a/vendor/github.com/vmihailenco/tagparser/tagparser.go b/vendor/github.com/vmihailenco/tagparser/tagparser.go new file mode 100644 index 000000000..431002aef --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/tagparser.go @@ -0,0 +1,181 @@ +package tagparser + +import ( + "strings" + + "github.com/vmihailenco/tagparser/internal/parser" +) + +type Tag struct { + Name string + Options map[string]string +} + +func (t *Tag) HasOption(name string) bool { + _, ok := t.Options[name] + return ok +} + +func Parse(s string) *Tag { + p := &tagParser{ + Parser: parser.NewString(s), + } + p.parseKey() + return &p.Tag +} + +type tagParser struct { + *parser.Parser + + Tag Tag + hasName bool + key string +} + +func (p *tagParser) setTagOption(key, value string) { + key = strings.TrimSpace(key) + value = strings.TrimSpace(value) + + if !p.hasName { + p.hasName = true + if key == "" { + p.Tag.Name = value + return + } + } + if p.Tag.Options == nil { + p.Tag.Options = make(map[string]string) + } + if key == "" { + p.Tag.Options[value] = "" + } else { + p.Tag.Options[key] = value + } +} + +func (p *tagParser) parseKey() { + p.key = "" + + var b []byte + for p.Valid() { + c := p.Read() + switch c { + case ',': + p.Skip(' ') + p.setTagOption("", string(b)) + p.parseKey() + return + case ':': + p.key = string(b) + p.parseValue() + return + case '\'': + p.parseQuotedValue() + return + default: + b = append(b, c) + } + } + + if len(b) > 0 { + p.setTagOption("", string(b)) + } +} + +func (p *tagParser) parseValue() { + const quote = '\'' + + c := p.Peek() + if c == quote { + p.Skip(quote) + p.parseQuotedValue() + return + } + + var b []byte + for p.Valid() { + c = p.Read() + switch c { + case '\\': + b = append(b, p.Read()) + case '(': + b = append(b, c) + b = p.readBrackets(b) + case ',': + p.Skip(' ') + p.setTagOption(p.key, string(b)) + p.parseKey() + return + default: + b = append(b, c) + } + } + p.setTagOption(p.key, string(b)) +} + +func (p *tagParser) readBrackets(b []byte) []byte { + var lvl int +loop: + for p.Valid() { + c := p.Read() + switch c { + case '\\': + b = append(b, p.Read()) + case '(': + b = append(b, c) + lvl++ + case ')': + b = append(b, c) + lvl-- + if lvl < 0 { + break loop + } + default: + b = append(b, c) + } + } + return b +} + +func (p *tagParser) parseQuotedValue() { + const quote = '\'' + + var b []byte + b = append(b, quote) + + for p.Valid() { + bb, ok := p.ReadSep(quote) + if !ok { + b = append(b, bb...) + break + } + + if len(bb) > 0 && bb[len(bb)-1] == '\\' { + b = append(b, bb[:len(bb)-1]...) + b = append(b, quote) + continue + } + + b = append(b, bb...) + b = append(b, quote) + break + } + + p.setTagOption(p.key, string(b)) + if p.Skip(',') { + p.Skip(' ') + } + p.parseKey() +} + +func Unquote(s string) (string, bool) { + const quote = '\'' + + if len(s) < 2 { + return s, false + } + if s[0] == quote && s[len(s)-1] == quote { + return s[1 : len(s)-1], true + } + return s, false +} diff --git a/vendor/github.com/xdg-go/scram/CHANGELOG.md b/vendor/github.com/xdg-go/scram/CHANGELOG.md index 425c122fa..21828db14 100644 --- a/vendor/github.com/xdg-go/scram/CHANGELOG.md +++ b/vendor/github.com/xdg-go/scram/CHANGELOG.md @@ -1,5 +1,13 @@ # CHANGELOG +## v1.1.1 - 2022-03-03 + +- Bump stringprep dependency to v1.0.3 for upstream CVE fix. + +## v1.1.0 - 2022-01-16 + +- Add SHA-512 hash generator function for convenience. + ## v1.0.2 - 2021-03-28 - Switch PBKDF2 dependency to github.com/xdg-go/pbkdf2 to diff --git a/vendor/github.com/xdg-go/scram/doc.go b/vendor/github.com/xdg-go/scram/doc.go index d43bee607..82e8aeed8 100644 --- a/vendor/github.com/xdg-go/scram/doc.go +++ b/vendor/github.com/xdg-go/scram/doc.go @@ -10,14 +10,16 @@ // // Usage // -// The scram package provides two variables, `SHA1` and `SHA256`, that are -// used to construct Client or Server objects. +// The scram package provides variables, `SHA1`, `SHA256`, and `SHA512`, that +// are used to construct Client or Server objects. // // clientSHA1, err := scram.SHA1.NewClient(username, password, authID) // clientSHA256, err := scram.SHA256.NewClient(username, password, authID) +// clientSHA512, err := scram.SHA512.NewClient(username, password, authID) // // serverSHA1, err := scram.SHA1.NewServer(credentialLookupFcn) // serverSHA256, err := scram.SHA256.NewServer(credentialLookupFcn) +// serverSHA512, err := scram.SHA512.NewServer(credentialLookupFcn) // // These objects are used to construct ClientConversation or // ServerConversation objects that are used to carry out authentication. diff --git a/vendor/github.com/xdg-go/scram/go.mod b/vendor/github.com/xdg-go/scram/go.mod index ad3763598..53324d05a 100644 --- a/vendor/github.com/xdg-go/scram/go.mod +++ b/vendor/github.com/xdg-go/scram/go.mod @@ -4,5 +4,5 @@ go 1.11 require ( github.com/xdg-go/pbkdf2 v1.0.0 - github.com/xdg-go/stringprep v1.0.2 + github.com/xdg-go/stringprep v1.0.3 ) diff --git a/vendor/github.com/xdg-go/scram/go.sum b/vendor/github.com/xdg-go/scram/go.sum index 04098820a..fa0938ac8 100644 --- a/vendor/github.com/xdg-go/scram/go.sum +++ b/vendor/github.com/xdg-go/scram/go.sum @@ -1,7 +1,7 @@ github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/xdg-go/scram/scram.go b/vendor/github.com/xdg-go/scram/scram.go index 927659969..a7b366027 100644 --- a/vendor/github.com/xdg-go/scram/scram.go +++ b/vendor/github.com/xdg-go/scram/scram.go @@ -9,6 +9,7 @@ package scram import ( "crypto/sha1" "crypto/sha256" + "crypto/sha512" "fmt" "hash" @@ -29,6 +30,10 @@ var SHA1 HashGeneratorFcn = func() hash.Hash { return sha1.New() } // to create Client objects configured for SHA-256 hashing. var SHA256 HashGeneratorFcn = func() hash.Hash { return sha256.New() } +// SHA512 is a function that returns a crypto/sha512 hasher and should be used +// to create Client objects configured for SHA-512 hashing. +var SHA512 HashGeneratorFcn = func() hash.Hash { return sha512.New() } + // NewClient constructs a SCRAM client component based on a given hash.Hash // factory receiver. This constructor will normalize the username, password // and authzID via the SASLprep algorithm, as recommended by RFC-5802. If diff --git a/vendor/github.com/xdg-go/stringprep/CHANGELOG.md b/vendor/github.com/xdg-go/stringprep/CHANGELOG.md index 2849637ca..e06787fba 100644 --- a/vendor/github.com/xdg-go/stringprep/CHANGELOG.md +++ b/vendor/github.com/xdg-go/stringprep/CHANGELOG.md @@ -1,5 +1,12 @@ # CHANGELOG + +## [v1.0.3] - 2022-03-01 + +### Maintenance + +- Bump golang.org/x/text to v0.3.7 due to CVE-2021-38561 + ## [v1.0.2] - 2021-03-27 diff --git a/vendor/github.com/xdg-go/stringprep/go.mod b/vendor/github.com/xdg-go/stringprep/go.mod index f57123b55..d71c7adbb 100644 --- a/vendor/github.com/xdg-go/stringprep/go.mod +++ b/vendor/github.com/xdg-go/stringprep/go.mod @@ -2,4 +2,4 @@ module github.com/xdg-go/stringprep go 1.11 -require golang.org/x/text v0.3.5 +require golang.org/x/text v0.3.7 diff --git a/vendor/github.com/xdg-go/stringprep/go.sum b/vendor/github.com/xdg-go/stringprep/go.sum index bbd33e892..2274b8038 100644 --- a/vendor/github.com/xdg-go/stringprep/go.sum +++ b/vendor/github.com/xdg-go/stringprep/go.sum @@ -1,3 +1,3 @@ -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/youmark/pkcs8/.travis.yml b/vendor/github.com/youmark/pkcs8/.travis.yml index 0bceef6fa..3608f7dc7 100644 --- a/vendor/github.com/youmark/pkcs8/.travis.yml +++ b/vendor/github.com/youmark/pkcs8/.travis.yml @@ -1,8 +1,13 @@ +arch: + - amd64 + - ppc64le language: go go: - - "1.9.x" - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" - master script: diff --git a/vendor/github.com/youmark/pkcs8/README.md b/vendor/github.com/youmark/pkcs8/README.md index f2167dbfe..ef6c76257 100644 --- a/vendor/github.com/youmark/pkcs8/README.md +++ b/vendor/github.com/youmark/pkcs8/README.md @@ -8,14 +8,15 @@ pkcs8 package fills the gap here. It implements functions to process private key [**Godoc**](http://godoc.org/github.com/youmark/pkcs8) ## Installation -Supports Go 1.9+ +Supports Go 1.10+. Release v1.1 is the last release supporting Go 1.9 ```text go get github.com/youmark/pkcs8 ``` ## dependency -This package depends on golang.org/x/crypto/pbkdf2 package. Use the following command to retrive pbkdf2 package +This package depends on golang.org/x/crypto/pbkdf2 and golang.org/x/crypto/scrypt packages. Use the following command to retrieve them ```text go get golang.org/x/crypto/pbkdf2 +go get golang.org/x/crypto/scrypt ``` diff --git a/vendor/github.com/youmark/pkcs8/cipher.go b/vendor/github.com/youmark/pkcs8/cipher.go new file mode 100644 index 000000000..2946c93e8 --- /dev/null +++ b/vendor/github.com/youmark/pkcs8/cipher.go @@ -0,0 +1,60 @@ +package pkcs8 + +import ( + "bytes" + "crypto/cipher" + "encoding/asn1" +) + +type cipherWithBlock struct { + oid asn1.ObjectIdentifier + ivSize int + keySize int + newBlock func(key []byte) (cipher.Block, error) +} + +func (c cipherWithBlock) IVSize() int { + return c.ivSize +} + +func (c cipherWithBlock) KeySize() int { + return c.keySize +} + +func (c cipherWithBlock) OID() asn1.ObjectIdentifier { + return c.oid +} + +func (c cipherWithBlock) Encrypt(key, iv, plaintext []byte) ([]byte, error) { + block, err := c.newBlock(key) + if err != nil { + return nil, err + } + return cbcEncrypt(block, key, iv, plaintext) +} + +func (c cipherWithBlock) Decrypt(key, iv, ciphertext []byte) ([]byte, error) { + block, err := c.newBlock(key) + if err != nil { + return nil, err + } + return cbcDecrypt(block, key, iv, ciphertext) +} + +func cbcEncrypt(block cipher.Block, key, iv, plaintext []byte) ([]byte, error) { + mode := cipher.NewCBCEncrypter(block, iv) + paddingLen := block.BlockSize() - (len(plaintext) % block.BlockSize()) + ciphertext := make([]byte, len(plaintext)+paddingLen) + copy(ciphertext, plaintext) + copy(ciphertext[len(plaintext):], bytes.Repeat([]byte{byte(paddingLen)}, paddingLen)) + mode.CryptBlocks(ciphertext, ciphertext) + return ciphertext, nil +} + +func cbcDecrypt(block cipher.Block, key, iv, ciphertext []byte) ([]byte, error) { + mode := cipher.NewCBCDecrypter(block, iv) + plaintext := make([]byte, len(ciphertext)) + mode.CryptBlocks(plaintext, ciphertext) + // TODO: remove padding + return plaintext, nil +} diff --git a/vendor/github.com/youmark/pkcs8/cipher_3des.go b/vendor/github.com/youmark/pkcs8/cipher_3des.go new file mode 100644 index 000000000..562966440 --- /dev/null +++ b/vendor/github.com/youmark/pkcs8/cipher_3des.go @@ -0,0 +1,24 @@ +package pkcs8 + +import ( + "crypto/des" + "encoding/asn1" +) + +var ( + oidDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7} +) + +func init() { + RegisterCipher(oidDESEDE3CBC, func() Cipher { + return TripleDESCBC + }) +} + +// TripleDESCBC is the 168-bit key 3DES cipher in CBC mode. +var TripleDESCBC = cipherWithBlock{ + ivSize: des.BlockSize, + keySize: 24, + newBlock: des.NewTripleDESCipher, + oid: oidDESEDE3CBC, +} diff --git a/vendor/github.com/youmark/pkcs8/cipher_aes.go b/vendor/github.com/youmark/pkcs8/cipher_aes.go new file mode 100644 index 000000000..c0372d1ee --- /dev/null +++ b/vendor/github.com/youmark/pkcs8/cipher_aes.go @@ -0,0 +1,84 @@ +package pkcs8 + +import ( + "crypto/aes" + "encoding/asn1" +) + +var ( + oidAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2} + oidAES128GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 6} + oidAES192CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 22} + oidAES192GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 26} + oidAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} + oidAES256GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 46} +) + +func init() { + RegisterCipher(oidAES128CBC, func() Cipher { + return AES128CBC + }) + RegisterCipher(oidAES128GCM, func() Cipher { + return AES128GCM + }) + RegisterCipher(oidAES192CBC, func() Cipher { + return AES192CBC + }) + RegisterCipher(oidAES192GCM, func() Cipher { + return AES192GCM + }) + RegisterCipher(oidAES256CBC, func() Cipher { + return AES256CBC + }) + RegisterCipher(oidAES256GCM, func() Cipher { + return AES256GCM + }) +} + +// AES128CBC is the 128-bit key AES cipher in CBC mode. +var AES128CBC = cipherWithBlock{ + ivSize: aes.BlockSize, + keySize: 16, + newBlock: aes.NewCipher, + oid: oidAES128CBC, +} + +// AES128GCM is the 128-bit key AES cipher in GCM mode. +var AES128GCM = cipherWithBlock{ + ivSize: aes.BlockSize, + keySize: 16, + newBlock: aes.NewCipher, + oid: oidAES128GCM, +} + +// AES192CBC is the 192-bit key AES cipher in CBC mode. +var AES192CBC = cipherWithBlock{ + ivSize: aes.BlockSize, + keySize: 24, + newBlock: aes.NewCipher, + oid: oidAES192CBC, +} + +// AES192GCM is the 912-bit key AES cipher in GCM mode. +var AES192GCM = cipherWithBlock{ + ivSize: aes.BlockSize, + keySize: 24, + newBlock: aes.NewCipher, + oid: oidAES192GCM, +} + +// AES256CBC is the 256-bit key AES cipher in CBC mode. +var AES256CBC = cipherWithBlock{ + ivSize: aes.BlockSize, + keySize: 32, + newBlock: aes.NewCipher, + oid: oidAES256CBC, +} + +// AES256GCM is the 256-bit key AES cipher in GCM mode. +var AES256GCM = cipherWithBlock{ + ivSize: aes.BlockSize, + keySize: 32, + newBlock: aes.NewCipher, + oid: oidAES256GCM, +} diff --git a/vendor/github.com/youmark/pkcs8/go.mod b/vendor/github.com/youmark/pkcs8/go.mod new file mode 100644 index 000000000..1fda53404 --- /dev/null +++ b/vendor/github.com/youmark/pkcs8/go.mod @@ -0,0 +1,5 @@ +module github.com/youmark/pkcs8 + +go 1.12 + +require golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 diff --git a/vendor/github.com/youmark/pkcs8/go.sum b/vendor/github.com/youmark/pkcs8/go.sum new file mode 100644 index 000000000..d59db129c --- /dev/null +++ b/vendor/github.com/youmark/pkcs8/go.sum @@ -0,0 +1,7 @@ +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/youmark/pkcs8/kdf_pbkdf2.go b/vendor/github.com/youmark/pkcs8/kdf_pbkdf2.go new file mode 100644 index 000000000..79697dd82 --- /dev/null +++ b/vendor/github.com/youmark/pkcs8/kdf_pbkdf2.go @@ -0,0 +1,91 @@ +package pkcs8 + +import ( + "crypto" + "crypto/sha1" + "crypto/sha256" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "hash" + + "golang.org/x/crypto/pbkdf2" +) + +var ( + oidPKCS5PBKDF2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 12} + oidHMACWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 2, 7} + oidHMACWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 2, 9} +) + +func init() { + RegisterKDF(oidPKCS5PBKDF2, func() KDFParameters { + return new(pbkdf2Params) + }) +} + +func newHashFromPRF(ai pkix.AlgorithmIdentifier) (func() hash.Hash, error) { + switch { + case len(ai.Algorithm) == 0 || ai.Algorithm.Equal(oidHMACWithSHA1): + return sha1.New, nil + case ai.Algorithm.Equal(oidHMACWithSHA256): + return sha256.New, nil + default: + return nil, errors.New("pkcs8: unsupported hash function") + } +} + +func newPRFParamFromHash(h crypto.Hash) (pkix.AlgorithmIdentifier, error) { + switch h { + case crypto.SHA1: + return pkix.AlgorithmIdentifier{ + Algorithm: oidHMACWithSHA1, + Parameters: asn1.RawValue{Tag: asn1.TagNull}}, nil + case crypto.SHA256: + return pkix.AlgorithmIdentifier{ + Algorithm: oidHMACWithSHA256, + Parameters: asn1.RawValue{Tag: asn1.TagNull}}, nil + } + return pkix.AlgorithmIdentifier{}, errors.New("pkcs8: unsupported hash function") +} + +type pbkdf2Params struct { + Salt []byte + IterationCount int + PRF pkix.AlgorithmIdentifier `asn1:"optional"` +} + +func (p pbkdf2Params) DeriveKey(password []byte, size int) (key []byte, err error) { + h, err := newHashFromPRF(p.PRF) + if err != nil { + return nil, err + } + return pbkdf2.Key(password, p.Salt, p.IterationCount, size, h), nil +} + +// PBKDF2Opts contains options for the PBKDF2 key derivation function. +type PBKDF2Opts struct { + SaltSize int + IterationCount int + HMACHash crypto.Hash +} + +func (p PBKDF2Opts) DeriveKey(password, salt []byte, size int) ( + key []byte, params KDFParameters, err error) { + + key = pbkdf2.Key(password, salt, p.IterationCount, size, p.HMACHash.New) + prfParam, err := newPRFParamFromHash(p.HMACHash) + if err != nil { + return nil, nil, err + } + params = pbkdf2Params{salt, p.IterationCount, prfParam} + return key, params, nil +} + +func (p PBKDF2Opts) GetSaltSize() int { + return p.SaltSize +} + +func (p PBKDF2Opts) OID() asn1.ObjectIdentifier { + return oidPKCS5PBKDF2 +} diff --git a/vendor/github.com/youmark/pkcs8/kdf_scrypt.go b/vendor/github.com/youmark/pkcs8/kdf_scrypt.go new file mode 100644 index 000000000..36c4f4f59 --- /dev/null +++ b/vendor/github.com/youmark/pkcs8/kdf_scrypt.go @@ -0,0 +1,62 @@ +package pkcs8 + +import ( + "encoding/asn1" + + "golang.org/x/crypto/scrypt" +) + +var ( + oidScrypt = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11591, 4, 11} +) + +func init() { + RegisterKDF(oidScrypt, func() KDFParameters { + return new(scryptParams) + }) +} + +type scryptParams struct { + Salt []byte + CostParameter int + BlockSize int + ParallelizationParameter int +} + +func (p scryptParams) DeriveKey(password []byte, size int) (key []byte, err error) { + return scrypt.Key(password, p.Salt, p.CostParameter, p.BlockSize, + p.ParallelizationParameter, size) +} + +// ScryptOpts contains options for the scrypt key derivation function. +type ScryptOpts struct { + SaltSize int + CostParameter int + BlockSize int + ParallelizationParameter int +} + +func (p ScryptOpts) DeriveKey(password, salt []byte, size int) ( + key []byte, params KDFParameters, err error) { + + key, err = scrypt.Key(password, salt, p.CostParameter, p.BlockSize, + p.ParallelizationParameter, size) + if err != nil { + return nil, nil, err + } + params = scryptParams{ + BlockSize: p.BlockSize, + CostParameter: p.CostParameter, + ParallelizationParameter: p.ParallelizationParameter, + Salt: salt, + } + return key, params, nil +} + +func (p ScryptOpts) GetSaltSize() int { + return p.SaltSize +} + +func (p ScryptOpts) OID() asn1.ObjectIdentifier { + return oidScrypt +} diff --git a/vendor/github.com/youmark/pkcs8/pkcs8.go b/vendor/github.com/youmark/pkcs8/pkcs8.go index 9270a7970..f27f62752 100644 --- a/vendor/github.com/youmark/pkcs8/pkcs8.go +++ b/vendor/github.com/youmark/pkcs8/pkcs8.go @@ -2,304 +2,308 @@ package pkcs8 import ( - "crypto/aes" - "crypto/cipher" - "crypto/des" + "crypto" "crypto/ecdsa" - "crypto/elliptic" "crypto/rand" "crypto/rsa" - "crypto/sha1" - "crypto/sha256" "crypto/x509" + "crypto/x509/pkix" "encoding/asn1" "errors" - - "golang.org/x/crypto/pbkdf2" -) - -// Copy from crypto/x509 -var ( - oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} - oidPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} - oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} + "fmt" ) -// Copy from crypto/x509 -var ( - oidNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33} - oidNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} - oidNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} - oidNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} -) +// DefaultOpts are the default options for encrypting a key if none are given. +// The defaults can be changed by the library user. +var DefaultOpts = &Opts{ + Cipher: AES256CBC, + KDFOpts: PBKDF2Opts{ + SaltSize: 8, + IterationCount: 10000, + HMACHash: crypto.SHA256, + }, +} -// Copy from crypto/x509 -func oidFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) { - switch curve { - case elliptic.P224(): - return oidNamedCurveP224, true - case elliptic.P256(): - return oidNamedCurveP256, true - case elliptic.P384(): - return oidNamedCurveP384, true - case elliptic.P521(): - return oidNamedCurveP521, true - } +// KDFOpts contains options for a key derivation function. +// An implementation of this interface must be specified when encrypting a PKCS#8 key. +type KDFOpts interface { + // DeriveKey derives a key of size bytes from the given password and salt. + // It returns the key and the ASN.1-encodable parameters used. + DeriveKey(password, salt []byte, size int) (key []byte, params KDFParameters, err error) + // GetSaltSize returns the salt size specified. + GetSaltSize() int + // OID returns the OID of the KDF specified. + OID() asn1.ObjectIdentifier +} - return nil, false +// KDFParameters contains parameters (salt, etc.) for a key deriviation function. +// It must be a ASN.1-decodable structure. +// An implementation of this interface is created when decoding an encrypted PKCS#8 key. +type KDFParameters interface { + // DeriveKey derives a key of size bytes from the given password. + // It uses the salt from the decoded parameters. + DeriveKey(password []byte, size int) (key []byte, err error) } -// Unecrypted PKCS8 -var ( - oidPKCS5PBKDF2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 12} - oidPBES2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 13} - oidAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} - oidAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2} - oidHMACWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 2, 9} - oidDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7} -) +var kdfs = make(map[string]func() KDFParameters) -type ecPrivateKey struct { - Version int - PrivateKey []byte - NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"` - PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"` +// RegisterKDF registers a function that returns a new instance of the given KDF +// parameters. This allows the library to support client-provided KDFs. +func RegisterKDF(oid asn1.ObjectIdentifier, params func() KDFParameters) { + kdfs[oid.String()] = params } -type privateKeyInfo struct { - Version int - PrivateKeyAlgorithm []asn1.ObjectIdentifier - PrivateKey []byte +// Cipher represents a cipher for encrypting the key material. +type Cipher interface { + // IVSize returns the IV size of the cipher, in bytes. + IVSize() int + // KeySize returns the key size of the cipher, in bytes. + KeySize() int + // Encrypt encrypts the key material. + Encrypt(key, iv, plaintext []byte) ([]byte, error) + // Decrypt decrypts the key material. + Decrypt(key, iv, ciphertext []byte) ([]byte, error) + // OID returns the OID of the cipher specified. + OID() asn1.ObjectIdentifier } -// Encrypted PKCS8 -type prfParam struct { - IdPRF asn1.ObjectIdentifier - NullParam asn1.RawValue -} +var ciphers = make(map[string]func() Cipher) -type pbkdf2Params struct { - Salt []byte - IterationCount int - PrfParam prfParam `asn1:"optional"` +// RegisterCipher registers a function that returns a new instance of the given +// cipher. This allows the library to support client-provided ciphers. +func RegisterCipher(oid asn1.ObjectIdentifier, cipher func() Cipher) { + ciphers[oid.String()] = cipher } -type pbkdf2Algorithms struct { - IdPBKDF2 asn1.ObjectIdentifier - PBKDF2Params pbkdf2Params +// Opts contains options for encrypting a PKCS#8 key. +type Opts struct { + Cipher Cipher + KDFOpts KDFOpts } -type pbkdf2Encs struct { - EncryAlgo asn1.ObjectIdentifier - IV []byte -} +// Unecrypted PKCS8 +var ( + oidPBES2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 13} +) -type pbes2Params struct { - KeyDerivationFunc pbkdf2Algorithms - EncryptionScheme pbkdf2Encs +type encryptedPrivateKeyInfo struct { + EncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedData []byte } -type pbes2Algorithms struct { - IdPBES2 asn1.ObjectIdentifier - PBES2Params pbes2Params +type pbes2Params struct { + KeyDerivationFunc pkix.AlgorithmIdentifier + EncryptionScheme pkix.AlgorithmIdentifier } -type encryptedPrivateKeyInfo struct { - EncryptionAlgorithm pbes2Algorithms - EncryptedData []byte +type privateKeyInfo struct { + Version int + PrivateKeyAlgorithm pkix.AlgorithmIdentifier + PrivateKey []byte } -// ParsePKCS8PrivateKeyRSA parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. -// -// The function can decrypt the private key encrypted with AES-256-CBC mode, and stored in PKCS #5 v2.0 format. -func ParsePKCS8PrivateKeyRSA(der []byte, v ...[]byte) (*rsa.PrivateKey, error) { - key, err := ParsePKCS8PrivateKey(der, v...) - if err != nil { - return nil, err - } - typedKey, ok := key.(*rsa.PrivateKey) +func parseKeyDerivationFunc(keyDerivationFunc pkix.AlgorithmIdentifier) (KDFParameters, error) { + oid := keyDerivationFunc.Algorithm.String() + newParams, ok := kdfs[oid] if !ok { - return nil, errors.New("key block is not of type RSA") + return nil, fmt.Errorf("pkcs8: unsupported KDF (OID: %s)", oid) } - return typedKey, nil -} - -// ParsePKCS8PrivateKeyECDSA parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. -// -// The function can decrypt the private key encrypted with AES-256-CBC mode, and stored in PKCS #5 v2.0 format. -func ParsePKCS8PrivateKeyECDSA(der []byte, v ...[]byte) (*ecdsa.PrivateKey, error) { - key, err := ParsePKCS8PrivateKey(der, v...) + params := newParams() + _, err := asn1.Unmarshal(keyDerivationFunc.Parameters.FullBytes, params) if err != nil { - return nil, err + return nil, errors.New("pkcs8: invalid KDF parameters") } - typedKey, ok := key.(*ecdsa.PrivateKey) + return params, nil +} + +func parseEncryptionScheme(encryptionScheme pkix.AlgorithmIdentifier) (Cipher, []byte, error) { + oid := encryptionScheme.Algorithm.String() + newCipher, ok := ciphers[oid] if !ok { - return nil, errors.New("key block is not of type ECDSA") + return nil, nil, fmt.Errorf("pkcs8: unsupported cipher (OID: %s)", oid) } - return typedKey, nil + cipher := newCipher() + var iv []byte + if _, err := asn1.Unmarshal(encryptionScheme.Parameters.FullBytes, &iv); err != nil { + return nil, nil, errors.New("pkcs8: invalid cipher parameters") + } + return cipher, iv, nil } -// ParsePKCS8PrivateKey parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. -// -// The function can decrypt the private key encrypted with AES-256-CBC mode, and stored in PKCS #5 v2.0 format. -func ParsePKCS8PrivateKey(der []byte, v ...[]byte) (interface{}, error) { +// ParsePrivateKey parses a DER-encoded PKCS#8 private key. +// Password can be nil. +// This is equivalent to ParsePKCS8PrivateKey. +func ParsePrivateKey(der []byte, password []byte) (interface{}, KDFParameters, error) { // No password provided, assume the private key is unencrypted - if v == nil { - return x509.ParsePKCS8PrivateKey(der) + if len(password) == 0 { + privateKey, err := x509.ParsePKCS8PrivateKey(der) + return privateKey, nil, err } // Use the password provided to decrypt the private key - password := v[0] var privKey encryptedPrivateKeyInfo if _, err := asn1.Unmarshal(der, &privKey); err != nil { - return nil, errors.New("pkcs8: only PKCS #5 v2.0 supported") + return nil, nil, errors.New("pkcs8: only PKCS #5 v2.0 supported") } - if !privKey.EncryptionAlgorithm.IdPBES2.Equal(oidPBES2) { - return nil, errors.New("pkcs8: only PBES2 supported") + if !privKey.EncryptionAlgorithm.Algorithm.Equal(oidPBES2) { + return nil, nil, errors.New("pkcs8: only PBES2 supported") } - if !privKey.EncryptionAlgorithm.PBES2Params.KeyDerivationFunc.IdPBKDF2.Equal(oidPKCS5PBKDF2) { - return nil, errors.New("pkcs8: only PBKDF2 supported") + var params pbes2Params + if _, err := asn1.Unmarshal(privKey.EncryptionAlgorithm.Parameters.FullBytes, ¶ms); err != nil { + return nil, nil, errors.New("pkcs8: invalid PBES2 parameters") } - encParam := privKey.EncryptionAlgorithm.PBES2Params.EncryptionScheme - kdfParam := privKey.EncryptionAlgorithm.PBES2Params.KeyDerivationFunc.PBKDF2Params + cipher, iv, err := parseEncryptionScheme(params.EncryptionScheme) + if err != nil { + return nil, nil, err + } - iv := encParam.IV - salt := kdfParam.Salt - iter := kdfParam.IterationCount - keyHash := sha1.New - if kdfParam.PrfParam.IdPRF.Equal(oidHMACWithSHA256) { - keyHash = sha256.New + kdfParams, err := parseKeyDerivationFunc(params.KeyDerivationFunc) + if err != nil { + return nil, nil, err } - encryptedKey := privKey.EncryptedData - var symkey []byte - var block cipher.Block - var err error - switch { - case encParam.EncryAlgo.Equal(oidAES128CBC): - symkey = pbkdf2.Key(password, salt, iter, 16, keyHash) - block, err = aes.NewCipher(symkey) - case encParam.EncryAlgo.Equal(oidAES256CBC): - symkey = pbkdf2.Key(password, salt, iter, 32, keyHash) - block, err = aes.NewCipher(symkey) - case encParam.EncryAlgo.Equal(oidDESEDE3CBC): - symkey = pbkdf2.Key(password, salt, iter, 24, keyHash) - block, err = des.NewTripleDESCipher(symkey) - default: - return nil, errors.New("pkcs8: only AES-256-CBC, AES-128-CBC and DES-EDE3-CBC are supported") + keySize := cipher.KeySize() + symkey, err := kdfParams.DeriveKey(password, keySize) + if err != nil { + return nil, nil, err } + + encryptedKey := privKey.EncryptedData + decryptedKey, err := cipher.Decrypt(symkey, iv, encryptedKey) if err != nil { - return nil, err + return nil, nil, err } - mode := cipher.NewCBCDecrypter(block, iv) - mode.CryptBlocks(encryptedKey, encryptedKey) - key, err := x509.ParsePKCS8PrivateKey(encryptedKey) + key, err := x509.ParsePKCS8PrivateKey(decryptedKey) if err != nil { - return nil, errors.New("pkcs8: incorrect password") + return nil, nil, errors.New("pkcs8: incorrect password") } - return key, nil + return key, kdfParams, nil } -func convertPrivateKeyToPKCS8(priv interface{}) ([]byte, error) { - var pkey privateKeyInfo - - switch priv := priv.(type) { - case *ecdsa.PrivateKey: - eckey, err := x509.MarshalECPrivateKey(priv) - if err != nil { - return nil, err - } - - oidNamedCurve, ok := oidFromNamedCurve(priv.Curve) - if !ok { - return nil, errors.New("pkcs8: unknown elliptic curve") - } - - // Per RFC5958, if publicKey is present, then version is set to v2(1) else version is set to v1(0). - // But openssl set to v1 even publicKey is present - pkey.Version = 1 - pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 2) - pkey.PrivateKeyAlgorithm[0] = oidPublicKeyECDSA - pkey.PrivateKeyAlgorithm[1] = oidNamedCurve - pkey.PrivateKey = eckey - case *rsa.PrivateKey: - - // Per RFC5958, if publicKey is present, then version is set to v2(1) else version is set to v1(0). - // But openssl set to v1 even publicKey is present - pkey.Version = 0 - pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 1) - pkey.PrivateKeyAlgorithm[0] = oidPublicKeyRSA - pkey.PrivateKey = x509.MarshalPKCS1PrivateKey(priv) - } - - return asn1.Marshal(pkey) -} +// MarshalPrivateKey encodes a private key into DER-encoded PKCS#8 with the given options. +// Password can be nil. +func MarshalPrivateKey(priv interface{}, password []byte, opts *Opts) ([]byte, error) { + if len(password) == 0 { + return x509.MarshalPKCS8PrivateKey(priv) + } + + if opts == nil { + opts = DefaultOpts + } -func convertPrivateKeyToPKCS8Encrypted(priv interface{}, password []byte) ([]byte, error) { // Convert private key into PKCS8 format - pkey, err := convertPrivateKeyToPKCS8(priv) + pkey, err := x509.MarshalPKCS8PrivateKey(priv) if err != nil { return nil, err } - // Calculate key from password based on PKCS5 algorithm - // Use 8 byte salt, 16 byte IV, and 2048 iteration - iter := 2048 - salt := make([]byte, 8) - iv := make([]byte, 16) + encAlg := opts.Cipher + salt := make([]byte, opts.KDFOpts.GetSaltSize()) _, err = rand.Read(salt) if err != nil { return nil, err } + iv := make([]byte, encAlg.IVSize()) _, err = rand.Read(iv) if err != nil { return nil, err } + key, kdfParams, err := opts.KDFOpts.DeriveKey(password, salt, encAlg.KeySize()) + if err != nil { + return nil, err + } - key := pbkdf2.Key(password, salt, iter, 32, sha256.New) + encryptedKey, err := encAlg.Encrypt(key, iv, pkey) + if err != nil { + return nil, err + } - // Use AES256-CBC mode, pad plaintext with PKCS5 padding scheme - padding := aes.BlockSize - len(pkey)%aes.BlockSize - if padding > 0 { - n := len(pkey) - pkey = append(pkey, make([]byte, padding)...) - for i := 0; i < padding; i++ { - pkey[n+i] = byte(padding) - } + marshalledParams, err := asn1.Marshal(kdfParams) + if err != nil { + return nil, err + } + keyDerivationFunc := pkix.AlgorithmIdentifier{ + Algorithm: opts.KDFOpts.OID(), + Parameters: asn1.RawValue{FullBytes: marshalledParams}, + } + marshalledIV, err := asn1.Marshal(iv) + if err != nil { + return nil, err + } + encryptionScheme := pkix.AlgorithmIdentifier{ + Algorithm: encAlg.OID(), + Parameters: asn1.RawValue{FullBytes: marshalledIV}, } - encryptedKey := make([]byte, len(pkey)) - block, err := aes.NewCipher(key) + encryptionAlgorithmParams := pbes2Params{ + EncryptionScheme: encryptionScheme, + KeyDerivationFunc: keyDerivationFunc, + } + marshalledEncryptionAlgorithmParams, err := asn1.Marshal(encryptionAlgorithmParams) if err != nil { return nil, err } - mode := cipher.NewCBCEncrypter(block, iv) - mode.CryptBlocks(encryptedKey, pkey) + encryptionAlgorithm := pkix.AlgorithmIdentifier{ + Algorithm: oidPBES2, + Parameters: asn1.RawValue{FullBytes: marshalledEncryptionAlgorithmParams}, + } - // pbkdf2algo := pbkdf2Algorithms{oidPKCS5PBKDF2, pbkdf2Params{salt, iter, prfParam{oidHMACWithSHA256}}} + encryptedPkey := encryptedPrivateKeyInfo{ + EncryptionAlgorithm: encryptionAlgorithm, + EncryptedData: encryptedKey, + } - pbkdf2algo := pbkdf2Algorithms{oidPKCS5PBKDF2, pbkdf2Params{salt, iter, prfParam{oidHMACWithSHA256, asn1.RawValue{Tag: asn1.TagNull}}}} - pbkdf2encs := pbkdf2Encs{oidAES256CBC, iv} - pbes2algo := pbes2Algorithms{oidPBES2, pbes2Params{pbkdf2algo, pbkdf2encs}} + return asn1.Marshal(encryptedPkey) +} - encryptedPkey := encryptedPrivateKeyInfo{pbes2algo, encryptedKey} +// ParsePKCS8PrivateKey parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. +func ParsePKCS8PrivateKey(der []byte, v ...[]byte) (interface{}, error) { + var password []byte + if len(v) > 0 { + password = v[0] + } + privateKey, _, err := ParsePrivateKey(der, password) + return privateKey, err +} - return asn1.Marshal(encryptedPkey) +// ParsePKCS8PrivateKeyRSA parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. +func ParsePKCS8PrivateKeyRSA(der []byte, v ...[]byte) (*rsa.PrivateKey, error) { + key, err := ParsePKCS8PrivateKey(der, v...) + if err != nil { + return nil, err + } + typedKey, ok := key.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("key block is not of type RSA") + } + return typedKey, nil +} + +// ParsePKCS8PrivateKeyECDSA parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. +func ParsePKCS8PrivateKeyECDSA(der []byte, v ...[]byte) (*ecdsa.PrivateKey, error) { + key, err := ParsePKCS8PrivateKey(der, v...) + if err != nil { + return nil, err + } + typedKey, ok := key.(*ecdsa.PrivateKey) + if !ok { + return nil, errors.New("key block is not of type ECDSA") + } + return typedKey, nil } // ConvertPrivateKeyToPKCS8 converts the private key into PKCS#8 format. // To encrypt the private key, the password of []byte type should be provided as the second parameter. // -// The only supported key types are RSA and ECDSA (*rsa.PublicKey or *ecdsa.PublicKey for priv) +// The only supported key types are RSA and ECDSA (*rsa.PrivateKey or *ecdsa.PrivateKey for priv) func ConvertPrivateKeyToPKCS8(priv interface{}, v ...[]byte) ([]byte, error) { - if v == nil { - return convertPrivateKeyToPKCS8(priv) + var password []byte + if len(v) > 0 { + password = v[0] } - - password := string(v[0]) - return convertPrivateKeyToPKCS8Encrypted(priv, []byte(password)) + return MarshalPrivateKey(priv, password, nil) } diff --git a/vendor/github.com/zclconf/go-cty/cty/primitive_type.go b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go index 7b3d1196c..603901732 100644 --- a/vendor/github.com/zclconf/go-cty/cty/primitive_type.go +++ b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go @@ -52,6 +52,51 @@ func (t primitiveType) GoString() string { } } +// rawNumberEqual is our cty-specific definition of whether two big floats +// underlying cty.Number are "equal" for the purposes of the Value.Equals and +// Value.RawEquals methods. +// +// The built-in equality for big.Float is a direct comparison of the mantissa +// bits and the exponent, but that's too precise a check for cty because we +// routinely send numbers through decimal approximations and back and so +// we only promise to accurately represent the subset of binary floating point +// numbers that can be derived from a decimal string representation. +// +// In respect of the fact that cty only tries to preserve numbers that can +// reasonably be written in JSON documents, we use the string representation of +// a decimal approximation of the number as our comparison, relying on the +// big.Float type's heuristic for discarding extraneous mantissa bits that seem +// likely to only be there as a result of an earlier decimal-to-binary +// approximation during parsing, e.g. in ParseNumberVal. +func rawNumberEqual(a, b *big.Float) bool { + switch { + case (a == nil) != (b == nil): + return false + case a == nil: // b == nil too then, due to previous case + return true + default: + // This format and precision matches that used by cty/json.Marshal, + // and thus achieves our definition of "two numbers are equal if + // we'd use the same JSON serialization for both of them". + const format = 'f' + const prec = -1 + aStr := a.Text(format, prec) + bStr := b.Text(format, prec) + + // The one exception to our rule about equality-by-stringification is + // negative zero, because we want -0 to always be equal to +0. + const posZero = "0" + const negZero = "-0" + if aStr == negZero { + aStr = posZero + } + if bStr == negZero { + bStr = posZero + } + return aStr == bStr + } +} + // Number is the numeric type. Number values are arbitrary-precision // decimal numbers, which can then be converted into Go's various numeric // types only if they are in the appropriate range. diff --git a/vendor/github.com/zclconf/go-cty/cty/value_ops.go b/vendor/github.com/zclconf/go-cty/cty/value_ops.go index 55bebe036..cdcc1506f 100644 --- a/vendor/github.com/zclconf/go-cty/cty/value_ops.go +++ b/vendor/github.com/zclconf/go-cty/cty/value_ops.go @@ -191,7 +191,7 @@ func (val Value) Equals(other Value) Value { switch { case ty == Number: - result = val.v.(*big.Float).Cmp(other.v.(*big.Float)) == 0 + result = rawNumberEqual(val.v.(*big.Float), other.v.(*big.Float)) case ty == Bool: result = val.v.(bool) == other.v.(bool) case ty == String: diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson.go b/vendor/go.mongodb.org/mongo-driver/bson/bson.go index 0c739dd0a..95ffc1078 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bson.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bson.go @@ -7,8 +7,6 @@ // Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer // See THIRD-PARTY-NOTICES for original license terms. -// +build go1.9 - package bson // import "go.mongodb.org/mongo-driver/bson" import ( diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go b/vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go deleted file mode 100644 index bbe779284..000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// +build !go1.9 - -package bson // import "go.mongodb.org/mongo-driver/bson" - -import ( - "math" - "strconv" - "strings" -) - -// Zeroer allows custom struct types to implement a report of zero -// state. All struct types that don't implement Zeroer or where IsZero -// returns false are considered to be not zero. -type Zeroer interface { - IsZero() bool -} - -// D is an ordered representation of a BSON document. This type should be used when the order of the elements matters, -// such as MongoDB command documents. If the order of the elements does not matter, an M should be used instead. -// -// Example usage: -// -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} -type D []E - -// Map creates a map from the elements of the D. -func (d D) Map() M { - m := make(M, len(d)) - for _, e := range d { - m[e.Key] = e.Value - } - return m -} - -// E represents a BSON element for a D. It is usually used inside a D. -type E struct { - Key string - Value interface{} -} - -// M is an unordered representation of a BSON document. This type should be used when the order of the elements does not -// matter. This type is handled as a regular map[string]interface{} when encoding and decoding. Elements will be -// serialized in an undefined, random order. If the order of the elements matters, a D should be used instead. -// -// Example usage: -// -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} -type M map[string]interface{} - -// An A is an ordered representation of a BSON array. -// -// Example usage: -// -// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} -type A []interface{} - -func formatDouble(f float64) string { - var s string - if math.IsInf(f, 1) { - s = "Infinity" - } else if math.IsInf(f, -1) { - s = "-Infinity" - } else if math.IsNaN(f) { - s = "NaN" - } else { - // Print exactly one decimalType place for integers; otherwise, print as many are necessary to - // perfectly represent it. - s = strconv.FormatFloat(f, 'G', -1, 64) - if !strings.ContainsRune(s, '.') { - s += ".0" - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go index 2c861b5cd..96195bcc9 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go @@ -43,7 +43,7 @@ type Unmarshaler interface { } // ValueUnmarshaler is an interface implemented by types that can unmarshal a -// BSON value representaiton of themselves. The BSON bytes and type can be +// BSON value representation of themselves. The BSON bytes and type can be // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. type ValueUnmarshaler interface { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go index 32fd14278..20f4797dd 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -53,7 +53,7 @@ type DefaultValueDecoders struct{} // RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with // the provided RegistryBuilder. // -// There is no support for decoding map[string]interface{} becuase there is no decoder for +// There is no support for decoding map[string]interface{} because there is no decoder for // interface{}, so users must either register this decoder themselves or use the // EmptyInterfaceDecoder available in the bson package. func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { @@ -1504,6 +1504,18 @@ func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bson return err } + // If the target Go value is a pointer and the BSON field value is empty, set the value to the + // zero value of the pointer (nil) and don't call UnmarshalBSON. UnmarshalBSON has no way to + // change the pointer value from within the function (only the value at the pointer address), + // so it can't set the pointer to "nil" itself. Since the most common Go value for an empty BSON + // field value is "nil", we set "nil" here and don't call UnmarshalBSON. This behavior matches + // the behavior of the Go "encoding/json" unmarshaler when the target Go value is a pointer and + // the JSON field value is "null". + if val.Kind() == reflect.Ptr && len(src) == 0 { + val.Set(reflect.Zero(val.Type())) + return nil + } + fn := val.Convert(tUnmarshaler).MethodByName("UnmarshalBSON") errVal := fn.Call([]reflect.Value{reflect.ValueOf(src)})[0] if !errVal.IsNil() { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go index 49a0c3f14..6bdb43cb4 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go @@ -30,7 +30,7 @@ var errInvalidValue = errors.New("cannot encode invalid element") var sliceWriterPool = sync.Pool{ New: func() interface{} { - sw := make(bsonrw.SliceWriter, 0, 0) + sw := make(bsonrw.SliceWriter, 0) return &sw }, } @@ -333,14 +333,7 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum continue } - if enc, ok := currEncoder.(ValueEncoder); ok { - err = enc.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - continue - } - err = encoder.EncodeValue(ec, vw, currVal) + err = currEncoder.EncodeValue(ec, vw, currVal) if err != nil { return err } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go index fbb8ef427..1f7acbcf1 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go @@ -126,14 +126,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v continue } - if enc, ok := currEncoder.(ValueEncoder); ok { - err = enc.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - continue - } - err = encoder.EncodeValue(ec, vw, currVal) + err = currEncoder.EncodeValue(ec, vw, currVal) if err != nil { return err } @@ -265,17 +258,15 @@ func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, case reflect.String: keyVal = reflect.ValueOf(key).Convert(keyType) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - s := string(key) - n, parseErr := strconv.ParseInt(s, 10, 64) + n, parseErr := strconv.ParseInt(key, 10, 64) if parseErr != nil || reflect.Zero(keyType).OverflowInt(n) { - err = fmt.Errorf("failed to unmarshal number key %v", s) + err = fmt.Errorf("failed to unmarshal number key %v", key) } keyVal = reflect.ValueOf(n).Convert(keyType) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - s := string(key) - n, parseErr := strconv.ParseUint(s, 10, 64) + n, parseErr := strconv.ParseUint(key, 10, 64) if parseErr != nil || reflect.Zero(keyType).OverflowUint(n) { - err = fmt.Errorf("failed to unmarshal number key %v", s) + err = fmt.Errorf("failed to unmarshal number key %v", key) break } keyVal = reflect.ValueOf(n).Convert(keyType) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go index 60abffb24..f6f3800d4 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -54,12 +54,6 @@ func (entme ErrNoTypeMapEntry) Error() string { // ErrNotInterface is returned when the provided type is not an interface. var ErrNotInterface = errors.New("The provided type is not an interface") -var defaultRegistry *Registry - -func init() { - defaultRegistry = buildDefaultRegistry() -} - // A RegistryBuilder is used to build a Registry. This type is not goroutine // safe. type RegistryBuilder struct { @@ -304,7 +298,7 @@ func (rb *RegistryBuilder) Build() *Registry { return registry } -// LookupEncoder inspects the registry for an encoder for the given type. The lookup precendence works as follows: +// LookupEncoder inspects the registry for an encoder for the given type. The lookup precedence works as follows: // // 1. An encoder registered for the exact type. If the given type represents an interface, an encoder registered using // RegisterTypeEncoder for the interface will be selected. @@ -374,7 +368,7 @@ func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (Value // in interfaceEncoders defaultEnc, found := r.lookupInterfaceEncoder(t, false) if !found { - defaultEnc, _ = r.kindEncoders[t.Kind()] + defaultEnc = r.kindEncoders[t.Kind()] } return newCondAddrEncoder(ienc.ve, defaultEnc), true } @@ -382,7 +376,7 @@ func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (Value return nil, false } -// LookupDecoder inspects the registry for an decoder for the given type. The lookup precendence works as follows: +// LookupDecoder inspects the registry for an decoder for the given type. The lookup precedence works as follows: // // 1. A decoder registered for the exact type. If the given type represents an interface, a decoder registered using // RegisterTypeDecoder for the interface will be selected. @@ -445,7 +439,7 @@ func (r *Registry) lookupInterfaceDecoder(t reflect.Type, allowAddr bool) (Value // in interfaceDecoders defaultDec, found := r.lookupInterfaceDecoder(t, false) if !found { - defaultDec, _ = r.kindDecoders[t.Kind()] + defaultDec = r.kindDecoders[t.Kind()] } return newCondAddrDecoder(idec.vd, defaultDec), true } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go index 9ce901782..be3f2081e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go @@ -331,14 +331,7 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()}) } - if decoder, ok := fd.decoder.(ValueDecoder); ok { - err = decoder.DecodeValue(dctx, vr, field.Elem()) - if err != nil { - return newDecodeError(fd.name, err) - } - continue - } - err = fd.decoder.DecodeValue(dctx, vr, field) + err = fd.decoder.DecodeValue(dctx, vr, field.Elem()) if err != nil { return newDecodeError(fd.name, err) } @@ -567,7 +560,7 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr } dominant, ok := dominantField(fields[i : i+advance]) if !ok || !sc.OverwriteDuplicatedInlinedFields { - return nil, fmt.Errorf("struct %s) duplicated key %s", t.String(), name) + return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name) } sd.fl = append(sd.fl, dominant) sd.fm[name] = dominant diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go index fb5b51084..07f4b70e6 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go @@ -16,36 +16,12 @@ import ( "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" ) -var ptBool = reflect.TypeOf((*bool)(nil)) -var ptInt8 = reflect.TypeOf((*int8)(nil)) -var ptInt16 = reflect.TypeOf((*int16)(nil)) -var ptInt32 = reflect.TypeOf((*int32)(nil)) -var ptInt64 = reflect.TypeOf((*int64)(nil)) -var ptInt = reflect.TypeOf((*int)(nil)) -var ptUint8 = reflect.TypeOf((*uint8)(nil)) -var ptUint16 = reflect.TypeOf((*uint16)(nil)) -var ptUint32 = reflect.TypeOf((*uint32)(nil)) -var ptUint64 = reflect.TypeOf((*uint64)(nil)) -var ptUint = reflect.TypeOf((*uint)(nil)) -var ptFloat32 = reflect.TypeOf((*float32)(nil)) -var ptFloat64 = reflect.TypeOf((*float64)(nil)) -var ptString = reflect.TypeOf((*string)(nil)) - var tBool = reflect.TypeOf(false) -var tFloat32 = reflect.TypeOf(float32(0)) var tFloat64 = reflect.TypeOf(float64(0)) -var tInt = reflect.TypeOf(int(0)) -var tInt8 = reflect.TypeOf(int8(0)) -var tInt16 = reflect.TypeOf(int16(0)) var tInt32 = reflect.TypeOf(int32(0)) var tInt64 = reflect.TypeOf(int64(0)) var tString = reflect.TypeOf("") var tTime = reflect.TypeOf(time.Time{}) -var tUint = reflect.TypeOf(uint(0)) -var tUint8 = reflect.TypeOf(uint8(0)) -var tUint16 = reflect.TypeOf(uint16(0)) -var tUint32 = reflect.TypeOf(uint32(0)) -var tUint64 = reflect.TypeOf(uint64(0)) var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem() var tByteSlice = reflect.TypeOf([]byte(nil)) @@ -74,7 +50,6 @@ var tDecimal = reflect.TypeOf(primitive.Decimal128{}) var tMinKey = reflect.TypeOf(primitive.MinKey{}) var tMaxKey = reflect.TypeOf(primitive.MaxKey{}) var tD = reflect.TypeOf(primitive.D{}) -var tM = reflect.TypeOf(primitive.M{}) var tA = reflect.TypeOf(primitive.A{}) var tE = reflect.TypeOf(primitive.E{}) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go index 8a690e37c..54c76bf74 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go @@ -423,7 +423,7 @@ func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { if ejp.canonical { return nil, invalidJSONErrorForType("object", t) } - return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as decribed in RFC-3339", t) + return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as described in RFC-3339", t) } ejp.advanceState() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go index 3af6fcf01..969570424 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go @@ -19,7 +19,7 @@ import ( ) func wrapperKeyBSONType(key string) bsontype.Type { - switch string(key) { + switch key { case "$numberInt": return bsontype.Int32 case "$numberLong": @@ -269,7 +269,7 @@ func (ejv *extJSONValue) parseDouble() (float64, error) { return 0, fmt.Errorf("$numberDouble value should be string, but instead is %s", ejv.t) } - switch string(ejv.v.(string)) { + switch ejv.v.(string) { case "Infinity": return math.Inf(1), nil case "-Infinity": @@ -364,7 +364,7 @@ func (ejv *extJSONValue) parseRegex() (pattern, options string, err error) { for i, key := range regexObj.keys { val := regexObj.values[i] - switch string(key) { + switch key { case "pattern": if patFound { return "", "", errors.New("duplicate pattern key in $regularExpression") diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go index 605e41a13..99ed524b7 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go @@ -10,7 +10,6 @@ import ( "bytes" "encoding/base64" "fmt" - "go.mongodb.org/mongo-driver/bson/primitive" "io" "math" "sort" @@ -19,13 +18,9 @@ import ( "sync" "time" "unicode/utf8" -) -var ejvwPool = sync.Pool{ - New: func() interface{} { - return new(extJSONValueWriter) - }, -} + "go.mongodb.org/mongo-driver/bson/primitive" +) // ExtJSONValueWriterPool is a pool for ExtJSON ValueWriters. type ExtJSONValueWriterPool struct { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go index 55378093a..458588b6b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go @@ -384,9 +384,13 @@ func (vr *valueReader) ReadBinary() (b []byte, btype byte, err error) { if err != nil { return nil, 0, err } + // Make a copy of the returned byte slice because it's just a subslice from the valueReader's + // buffer and is not safe to return in the unmarshaled value. + cp := make([]byte, len(b)) + copy(cp, b) vr.pop() - return b, btype, nil + return cp, btype, nil } func (vr *valueReader) ReadBoolean() (bool, error) { @@ -737,6 +741,9 @@ func (vr *valueReader) ReadValue() (ValueReader, error) { return vr, nil } +// readBytes reads length bytes from the valueReader starting at the current offset. Note that the +// returned byte slice is a subslice from the valueReader buffer and must be converted or copied +// before returning in an unmarshaled value. func (vr *valueReader) readBytes(length int32) ([]byte, error) { if length < 0 { return nil, fmt.Errorf("invalid length: %d", length) @@ -748,6 +755,7 @@ func (vr *valueReader) readBytes(length int32) ([]byte, error) { start := vr.offset vr.offset += int64(length) + return vr.d[start : start+int64(length)], nil } @@ -790,16 +798,6 @@ func (vr *valueReader) readCString() (string, error) { return string(vr.d[start : start+int64(idx)]), nil } -func (vr *valueReader) skipCString() error { - idx := bytes.IndexByte(vr.d[vr.offset:], 0x00) - if idx < 0 { - return io.EOF - } - // idx does not include the null byte - vr.offset += int64(idx) + 1 - return nil -} - func (vr *valueReader) readString() (string, error) { length, err := vr.readLength() if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go index a39c4ea4c..f95a08afd 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go @@ -529,7 +529,7 @@ func (vw *valueWriter) WriteDocumentEnd() error { vw.pop() if vw.stack[vw.frame].mode == mCodeWithScope { - // We ignore the error here because of the gaurantee of writeLength. + // We ignore the error here because of the guarantee of writeLength. // See the docs for writeLength for more info. _ = vw.writeLength() vw.pop() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go index 7644df129..dff65f87f 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go @@ -76,27 +76,3 @@ func (sw *SliceWriter) Write(p []byte) (int, error) { *sw = append(*sw, p...) return written, nil } - -type writer []byte - -func (w *writer) Write(p []byte) (int, error) { - index := len(*w) - return w.WriteAt(p, int64(index)) -} - -func (w *writer) WriteAt(p []byte, off int64) (int, error) { - newend := off + int64(len(p)) - if newend < int64(len(*w)) { - newend = int64(len(*w)) - } - - if newend > int64(cap(*w)) { - buf := make([]byte, int64(2*cap(*w))+newend) - copy(buf, *w) - *w = buf - } - - *w = []byte(*w)[:newend] - copy([]byte(*w)[off:], p) - return len(p), nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go b/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go index 63a59ca08..7c91ae518 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go @@ -38,6 +38,8 @@ const ( BinaryUUIDOld byte = 0x03 BinaryUUID byte = 0x04 BinaryMD5 byte = 0x05 + BinaryEncrypted byte = 0x06 + BinaryColumn byte = 0x07 BinaryUserDefined byte = 0x80 ) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/doc.go index 094be934f..5e3825a23 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/doc.go @@ -118,7 +118,7 @@ // types, this tag is ignored. // // 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles unmarshalled -// into that field will be trucated at the decimal point. For example, if 3.14 is unmarshalled into a field of type int, +// into that field will be truncated at the decimal point. For example, if 3.14 is unmarshalled into a field of type int, // it will be unmarshalled as 3. If this tag is not specified, the decoder will throw an error if the value cannot be // decoded without losing precision. For float64 or non-numeric types, this tag is ignored. // diff --git a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go index 8d8fc8ce1..db8d8ee92 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go @@ -7,6 +7,9 @@ package bson import ( + "bytes" + "encoding/json" + "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/bson/bsonrw" "go.mongodb.org/mongo-driver/bson/bsontype" @@ -221,3 +224,25 @@ func MarshalExtJSONAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val return *sw, nil } + +// IndentExtJSON will prefix and indent the provided extended JSON src and append it to dst. +func IndentExtJSON(dst *bytes.Buffer, src []byte, prefix, indent string) error { + return json.Indent(dst, src, prefix, indent) +} + +// MarshalExtJSONIndent returns the extended JSON encoding of val with each line with prefixed +// and indented. +func MarshalExtJSONIndent(val interface{}, canonical, escapeHTML bool, prefix, indent string) ([]byte, error) { + marshaled, err := MarshalExtJSON(val, canonical, escapeHTML) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + err = IndentExtJSON(&buf, marshaled, prefix, indent) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go index a57e1d698..ffe4eed07 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go @@ -133,11 +133,9 @@ Loop: } // BigInt returns significand as big.Int and exponent, bi * 10 ^ exp. -func (d Decimal128) BigInt() (bi *big.Int, exp int, err error) { +func (d Decimal128) BigInt() (*big.Int, int, error) { high, low := d.GetBytes() - var posSign bool // positive sign - - posSign = high>>63&1 == 0 + posSign := high>>63&1 == 0 // positive sign switch high >> 58 & (1<<5 - 1) { case 0x1F: @@ -149,6 +147,7 @@ func (d Decimal128) BigInt() (bi *big.Int, exp int, err error) { return nil, 0, ErrParseNegInf } + var exp int if high>>61&3 == 3 { // Bits: 1*sign 2*ignored 14*exponent 111*significand. // Implicit 0b100 prefix in significand. @@ -171,7 +170,7 @@ func (d Decimal128) BigInt() (bi *big.Int, exp int, err error) { return new(big.Int), 0, nil } - bi = big.NewInt(0) + bi := big.NewInt(0) const host32bit = ^uint(0)>>32 == 0 if host32bit { bi.SetBits([]big.Word{big.Word(low), big.Word(low >> 32), big.Word(high), big.Word(high >> 32)}) @@ -182,7 +181,7 @@ func (d Decimal128) BigInt() (bi *big.Int, exp int, err error) { if !posSign { return bi.Neg(bi), exp, nil } - return + return bi, exp, nil } // IsNaN returns whether d is NaN. diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go index a5a7d7c69..652898fea 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go @@ -10,8 +10,8 @@ package primitive import ( - "bytes" "crypto/rand" + "encoding" "encoding/binary" "encoding/hex" "encoding/json" @@ -34,6 +34,9 @@ var NilObjectID ObjectID var objectIDCounter = readRandomUint32() var processUnique = processUniqueBytes() +var _ encoding.TextMarshaler = ObjectID{} +var _ encoding.TextUnmarshaler = &ObjectID{} + // NewObjectID generates a new ObjectID. func NewObjectID() ObjectID { return NewObjectIDFromTimestamp(time.Now()) @@ -67,7 +70,7 @@ func (id ObjectID) String() string { // IsZero returns true if id is the empty ObjectID. func (id ObjectID) IsZero() bool { - return bytes.Equal(id[:], NilObjectID[:]) + return id == NilObjectID } // ObjectIDFromHex creates a new ObjectID from a hex string. It returns an error if the hex string is not a @@ -83,7 +86,7 @@ func ObjectIDFromHex(s string) (ObjectID, error) { } var oid [12]byte - copy(oid[:], b[:]) + copy(oid[:], b) return oid, nil } @@ -94,6 +97,23 @@ func IsValidObjectID(s string) bool { return err == nil } +// MarshalText returns the ObjectID as UTF-8-encoded text. Implementing this allows us to use ObjectID +// as a map key when marshalling JSON. See https://pkg.go.dev/encoding#TextMarshaler +func (id ObjectID) MarshalText() ([]byte, error) { + return []byte(id.Hex()), nil +} + +// UnmarshalText populates the byte slice with the ObjectID. Implementing this allows us to use ObjectID +// as a map key when unmarshalling JSON. See https://pkg.go.dev/encoding#TextUnmarshaler +func (id *ObjectID) UnmarshalText(b []byte) error { + oid, err := ObjectIDFromHex(string(b)) + if err != nil { + return err + } + *id = oid + return nil +} + // MarshalJSON returns the ObjectID as a string func (id ObjectID) MarshalJSON() ([]byte, error) { return json.Marshal(id.Hex()) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go index 51984e223..b3cba1bf9 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go @@ -111,7 +111,7 @@ func (d DBPointer) String() string { // Equal compares d to d2 and returns true if they are equal. func (d DBPointer) Equal(d2 DBPointer) bool { - return d.DB == d2.DB && bytes.Equal(d.Pointer[:], d2.Pointer[:]) + return d == d2 } // IsZero returns if d is the empty DBPointer. diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go index f397fa2d5..1cbe3884d 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go @@ -14,6 +14,9 @@ import ( "go.mongodb.org/mongo-driver/bson/bsonrw" ) +var tRawValue = reflect.TypeOf(RawValue{}) +var tRaw = reflect.TypeOf(Raw(nil)) + var primitiveCodecs PrimitiveCodecs // PrimitiveCodecs is a namespace for all of the default bsoncodec.Codecs for the primitive types @@ -87,25 +90,3 @@ func (PrimitiveCodecs) RawDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.Valu val.Set(reflect.ValueOf(rdr)) return err } - -func (pc PrimitiveCodecs) encodeRaw(ec bsoncodec.EncodeContext, dw bsonrw.DocumentWriter, raw Raw) error { - var copier bsonrw.Copier - elems, err := raw.Elements() - if err != nil { - return err - } - for _, elem := range elems { - dvw, err := dw.WriteDocumentElement(elem.Key()) - if err != nil { - return err - } - - val := elem.Value() - err = copier.CopyValueFromBytes(dvw, val.Type, val.Value) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw.go b/vendor/go.mongodb.org/mongo-driver/bson/raw.go index 2aae9f56a..efd705daa 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw.go @@ -15,7 +15,6 @@ import ( // ErrNilReader indicates that an operation was attempted on a nil bson.Reader. var ErrNilReader = errors.New("nil reader") -var errValidateDone = errors.New("validation loop complete") // Raw is a wrapper around a byte slice. It will interpret the slice as a // BSON document. This type is a wrapper around a bsoncore.Document. Errors returned from the @@ -84,9 +83,3 @@ func (r Raw) IndexErr(index uint) (RawElement, error) { // String implements the fmt.Stringer interface. func (r Raw) String() string { return bsoncore.Document(r).String() } - -// readi32 is a helper function for reading an int32 from slice of bytes. -func readi32(b []byte) int32 { - _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 - return int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/registry.go index 09062d208..16d7573e7 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/registry.go @@ -13,7 +13,7 @@ import "go.mongodb.org/mongo-driver/bson/bsoncodec" var DefaultRegistry = NewRegistryBuilder().Build() // NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and -// deocders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the +// decoders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the // PrimitiveCodecs type in this package. func NewRegistryBuilder() *bsoncodec.RegistryBuilder { rb := bsoncodec.NewRegistryBuilder() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/types.go b/vendor/go.mongodb.org/mongo-driver/bson/types.go index bf91f691a..13a1c35cf 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/types.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/types.go @@ -7,11 +7,7 @@ package bson import ( - "reflect" - "time" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" ) // These constants uniquely refer to each BSON type. @@ -38,48 +34,3 @@ const ( TypeMinKey = bsontype.MinKey TypeMaxKey = bsontype.MaxKey ) - -var tBinary = reflect.TypeOf(primitive.Binary{}) -var tBool = reflect.TypeOf(false) -var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{}) -var tDBPointer = reflect.TypeOf(primitive.DBPointer{}) -var tDecimal = reflect.TypeOf(primitive.Decimal128{}) -var tD = reflect.TypeOf(D{}) -var tA = reflect.TypeOf(A{}) -var tDateTime = reflect.TypeOf(primitive.DateTime(0)) -var tUndefined = reflect.TypeOf(primitive.Undefined{}) -var tNull = reflect.TypeOf(primitive.Null{}) -var tRawValue = reflect.TypeOf(RawValue{}) -var tFloat32 = reflect.TypeOf(float32(0)) -var tFloat64 = reflect.TypeOf(float64(0)) -var tInt = reflect.TypeOf(int(0)) -var tInt8 = reflect.TypeOf(int8(0)) -var tInt16 = reflect.TypeOf(int16(0)) -var tInt32 = reflect.TypeOf(int32(0)) -var tInt64 = reflect.TypeOf(int64(0)) -var tJavaScript = reflect.TypeOf(primitive.JavaScript("")) -var tOID = reflect.TypeOf(primitive.ObjectID{}) -var tRaw = reflect.TypeOf(Raw(nil)) -var tRegex = reflect.TypeOf(primitive.Regex{}) -var tString = reflect.TypeOf("") -var tSymbol = reflect.TypeOf(primitive.Symbol("")) -var tTime = reflect.TypeOf(time.Time{}) -var tTimestamp = reflect.TypeOf(primitive.Timestamp{}) -var tUint = reflect.TypeOf(uint(0)) -var tUint8 = reflect.TypeOf(uint8(0)) -var tUint16 = reflect.TypeOf(uint16(0)) -var tUint32 = reflect.TypeOf(uint32(0)) -var tUint64 = reflect.TypeOf(uint64(0)) -var tMinKey = reflect.TypeOf(primitive.MinKey{}) -var tMaxKey = reflect.TypeOf(primitive.MaxKey{}) - -var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem() -var tEmptySlice = reflect.TypeOf([]interface{}(nil)) - -var zeroVal reflect.Value - -// this references the quantity of milliseconds between zero time and -// the unix epoch. useful for making sure that we convert time.Time -// objects correctly to match the legacy bson library's handling of -// time.Time values. -const zeroEpochMs = int64(62135596800000) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go b/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go index 6f9ca04d3..f936ba183 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go @@ -23,7 +23,7 @@ type Unmarshaler interface { } // ValueUnmarshaler is an interface implemented by types that can unmarshal a -// BSON value representaiton of themselves. The BSON bytes and type can be +// BSON value representation of themselves. The BSON bytes and type can be // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. type ValueUnmarshaler interface { diff --git a/vendor/go.mongodb.org/mongo-driver/event/monitoring.go b/vendor/go.mongodb.org/mongo-driver/event/monitoring.go index bc802f0f9..ac05e401c 100644 --- a/vendor/go.mongodb.org/mongo-driver/event/monitoring.go +++ b/vendor/go.mongodb.org/mongo-driver/event/monitoring.go @@ -22,6 +22,9 @@ type CommandStartedEvent struct { CommandName string RequestID int64 ConnectionID string + // ServerConnectionID contains the connection ID from the server of the operation. If the server does not return + // this value (e.g. on MDB < 4.2), it is unset. + ServerConnectionID *int32 // ServiceID contains the ID of the server to which the command was sent if it is running behind a load balancer. // Otherwise, it is unset. ServiceID *primitive.ObjectID @@ -33,6 +36,9 @@ type CommandFinishedEvent struct { CommandName string RequestID int64 ConnectionID string + // ServerConnectionID contains the connection ID from the server of the operation. If the server does not return + // this value (e.g. on MDB < 4.2), it is unset. + ServerConnectionID *int32 // ServiceID contains the ID of the server to which the command was sent if it is running behind a load balancer. // Otherwise, it is unset. ServiceID *primitive.ObjectID @@ -64,19 +70,22 @@ const ( ReasonStale = "stale" ReasonConnectionErrored = "connectionError" ReasonTimedOut = "timeout" + ReasonError = "error" ) // strings for pool command monitoring types const ( - ConnectionClosed = "ConnectionClosed" PoolCreated = "ConnectionPoolCreated" + PoolReady = "ConnectionPoolReady" + PoolCleared = "ConnectionPoolCleared" + PoolClosedEvent = "ConnectionPoolClosed" ConnectionCreated = "ConnectionCreated" ConnectionReady = "ConnectionReady" + ConnectionClosed = "ConnectionClosed" + GetStarted = "ConnectionCheckOutStarted" GetFailed = "ConnectionCheckOutFailed" GetSucceeded = "ConnectionCheckedOut" ConnectionReturned = "ConnectionCheckedIn" - PoolCleared = "ConnectionPoolCleared" - PoolClosedEvent = "ConnectionPoolClosed" ) // MonitorPoolOptions contains pool options as formatted in pool events diff --git a/vendor/go.mongodb.org/mongo-driver/internal/const.go b/vendor/go.mongodb.org/mongo-driver/internal/const.go index 772053ad2..a7ef69d13 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/const.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/const.go @@ -9,7 +9,11 @@ package internal // import "go.mongodb.org/mongo-driver/internal" // Version is the current version of the driver. var Version = "local build" -// SetMockServiceID enables a mode in which the driver mocks server support for returning a "serviceId" field in "hello" -// command responses by using the value of "topologyVersion.processId". This is used for testing load balancer support -// until an upstream service can support running behind a load balancer. -var SetMockServiceID = false +// LegacyHello is the legacy version of the hello command. +var LegacyHello = "isMaster" + +// LegacyHelloLowercase is the lowercase, legacy version of the hello command. +var LegacyHelloLowercase = "ismaster" + +// LegacyNotPrimary is the legacy version of the "not primary" server error message. +var LegacyNotPrimary = "not master" diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go new file mode 100644 index 000000000..d7b753b77 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go @@ -0,0 +1,77 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package randutil provides common random number utilities. +package randutil + +import ( + crand "crypto/rand" + "fmt" + "io" + "math/rand" + "sync" +) + +// A LockedRand wraps a "math/rand".Rand and is safe to use from multiple goroutines. +type LockedRand struct { + mu sync.Mutex + r *rand.Rand +} + +// NewLockedRand returns a new LockedRand that uses random values from src to generate other random +// values. It is safe to use from multiple goroutines. +func NewLockedRand(src rand.Source) *LockedRand { + return &LockedRand{ + // Ignore gosec warning "Use of weak random number generator (math/rand instead of + // crypto/rand)". We intentionally use a pseudo-random number generator. + /* #nosec G404 */ + r: rand.New(src), + } +} + +// Read generates len(p) random bytes and writes them into p. It always returns len(p) and a nil +// error. +func (lr *LockedRand) Read(p []byte) (int, error) { + lr.mu.Lock() + n, err := lr.r.Read(p) + lr.mu.Unlock() + return n, err +} + +// Intn returns, as an int, a non-negative pseudo-random number in the half-open interval [0,n). It +// panics if n <= 0. +func (lr *LockedRand) Intn(n int) int { + lr.mu.Lock() + x := lr.r.Intn(n) + lr.mu.Unlock() + return x +} + +// Shuffle pseudo-randomizes the order of elements. n is the number of elements. Shuffle panics if +// n < 0. swap swaps the elements with indexes i and j. +// +// Note that Shuffle locks the LockedRand, so shuffling large collections may adversely affect other +// concurrent calls. If many concurrent Shuffle and random value calls are required, consider using +// the global "math/rand".Shuffle instead because it uses much more granular locking. +func (lr *LockedRand) Shuffle(n int, swap func(i, j int)) { + lr.mu.Lock() + lr.r.Shuffle(n, swap) + lr.mu.Unlock() +} + +// CryptoSeed returns a random int64 read from the "crypto/rand" random number generator. It is +// intended to be used to seed pseudorandom number generators at package initialization. It panics +// if it encounters any errors. +func CryptoSeed() int64 { + var b [8]byte + _, err := io.ReadFull(crand.Reader, b[:]) + if err != nil { + panic(fmt.Errorf("failed to read 8 bytes from a \"crypto/rand\".Reader: %v", err)) + } + + return (int64(b[0]) << 0) | (int64(b[1]) << 8) | (int64(b[2]) << 16) | (int64(b[3]) << 24) | + (int64(b[4]) << 32) | (int64(b[5]) << 40) | (int64(b[6]) << 48) | (int64(b[7]) << 56) +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/semaphore.go b/vendor/go.mongodb.org/mongo-driver/internal/semaphore.go deleted file mode 100644 index 792e531a5..000000000 --- a/vendor/go.mongodb.org/mongo-driver/internal/semaphore.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package internal - -import ( - "context" - "errors" -) - -// NewSemaphore creates a new semaphore. -func NewSemaphore(slots uint64) *Semaphore { - ch := make(chan struct{}, slots) - for i := uint64(0); i < slots; i++ { - ch <- struct{}{} - } - - return &Semaphore{ - permits: ch, - } -} - -// Semaphore is a synchronization primitive that controls access -// to a common resource. -type Semaphore struct { - permits chan struct{} -} - -// Len gets the number of permits available. -func (s *Semaphore) Len() uint64 { - return uint64(len(s.permits)) -} - -// Wait waits until a resource is available or until the context -// is done. -func (s *Semaphore) Wait(ctx context.Context) error { - select { - case <-s.permits: - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -// Release releases a resource back into the pool. -func (s *Semaphore) Release() error { - select { - case s.permits <- struct{}{}: - default: - return errors.New("internal.Semaphore.Release: attempt to release more resources than are available") - } - - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/string_util.go b/vendor/go.mongodb.org/mongo-driver/internal/string_util.go index db1e1890e..6cafa791d 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/string_util.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/string_util.go @@ -33,7 +33,7 @@ func StringSliceFromRawValue(name string, val bson.RawValue) ([]string, error) { return nil, err } - var strs []string + strs := make([]string, 0, len(arrayValues)) for _, arrayVal := range arrayValues { str, ok := arrayVal.StringValueOK() if !ok { diff --git a/vendor/go.mongodb.org/mongo-driver/internal/uri_validation_errors.go b/vendor/go.mongodb.org/mongo-driver/internal/uri_validation_errors.go index 8d1fcdc75..21e73002a 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/uri_validation_errors.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/uri_validation_errors.go @@ -15,4 +15,8 @@ var ( ErrLoadBalancedWithReplicaSet = errors.New("loadBalanced cannot be set to true if a replica set name is specified") // ErrLoadBalancedWithDirectConnection is returned when loadBalanced=true is specified in a URI with the directConnection option. ErrLoadBalancedWithDirectConnection = errors.New("loadBalanced cannot be set to true if the direct connection option is specified") + // ErrSRVMaxHostsWithReplicaSet is returned when srvMaxHosts > 0 is specified in a URI with the replicaSet option. + ErrSRVMaxHostsWithReplicaSet = errors.New("srvMaxHosts cannot be a positive value if a replica set name is specified") + // ErrSRVMaxHostsWithLoadBalanced is returned when srvMaxHosts > 0 is specified in a URI with loadBalanced=true. + ErrSRVMaxHostsWithLoadBalanced = errors.New("srvMaxHosts cannot be a positive value if loadBalanced is set to true") ) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go index 56736a238..e748ced6a 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go @@ -35,6 +35,7 @@ type bulkWrite struct { selector description.ServerSelector writeConcern *writeconcern.WriteConcern result BulkWriteResult + let interface{} } func (bw *bulkWrite) execute(ctx context.Context) error { @@ -59,11 +60,6 @@ func (bw *bulkWrite) execute(ctx context.Context) error { continue } - bypassDocValidation := bw.bypassDocumentValidation - if bypassDocValidation != nil && !*bypassDocValidation { - bypassDocValidation = nil - } - batchRes, batchErr, err := bw.runBatch(ctx, batch) bw.mergeResults(batchRes) @@ -118,7 +114,7 @@ func (bw *bulkWrite) runBatch(ctx context.Context, batch bulkWriteBatch) (BulkWr batchErr.Labels = writeErr.Labels batchErr.WriteConcernError = convertDriverWriteConcernError(writeErr.WriteConcernError) } - batchRes.InsertedCount = int64(res.N) + batchRes.InsertedCount = res.N case *DeleteOneModel, *DeleteManyModel: res, err := bw.runDelete(ctx, batch) if err != nil { @@ -130,7 +126,7 @@ func (bw *bulkWrite) runBatch(ctx context.Context, batch bulkWriteBatch) (BulkWr batchErr.Labels = writeErr.Labels batchErr.WriteConcernError = convertDriverWriteConcernError(writeErr.WriteConcernError) } - batchRes.DeletedCount = int64(res.N) + batchRes.DeletedCount = res.N case *ReplaceOneModel, *UpdateOneModel, *UpdateManyModel: res, err := bw.runUpdate(ctx, batch) if err != nil { @@ -142,8 +138,8 @@ func (bw *bulkWrite) runBatch(ctx context.Context, batch bulkWriteBatch) (BulkWr batchErr.Labels = writeErr.Labels batchErr.WriteConcernError = convertDriverWriteConcernError(writeErr.WriteConcernError) } - batchRes.MatchedCount = int64(res.N) - batchRes.ModifiedCount = int64(res.NModified) + batchRes.MatchedCount = res.N + batchRes.ModifiedCount = res.NModified batchRes.UpsertedCount = int64(len(res.Upserted)) for _, upsert := range res.Upserted { batchRes.UpsertedIDs[int64(batch.indexes[upsert.Index])] = upsert.ID @@ -233,6 +229,13 @@ func (bw *bulkWrite) runDelete(ctx context.Context, batch bulkWriteBatch) (opera Database(bw.collection.db.name).Collection(bw.collection.name). Deployment(bw.collection.client.deployment).Crypt(bw.collection.client.cryptFLE).Hint(hasHint). ServerAPI(bw.collection.client.serverAPI) + if bw.let != nil { + let, err := transformBsoncoreDocument(bw.collection.registry, bw.let, true, "let") + if err != nil { + return operation.DeleteResult{}, err + } + op = op.Let(let) + } if bw.ordered != nil { op = op.Ordered(*bw.ordered) } @@ -314,6 +317,13 @@ func (bw *bulkWrite) runUpdate(ctx context.Context, batch bulkWriteBatch) (opera Database(bw.collection.db.name).Collection(bw.collection.name). Deployment(bw.collection.client.deployment).Crypt(bw.collection.client.cryptFLE).Hint(hasHint). ArrayFilters(hasArrayFilters).ServerAPI(bw.collection.client.serverAPI) + if bw.let != nil { + let, err := transformBsoncoreDocument(bw.collection.registry, bw.let, true, "let") + if err != nil { + return operation.UpdateResult{}, err + } + op = op.Let(let) + } if bw.ordered != nil { op = op.Ordered(*bw.ordered) } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go index c10c0396b..a76eb7c9c 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go @@ -38,7 +38,7 @@ var ( resumableErrorLabel = "ResumableChangeStreamError" errorCursorNotFound int32 = 43 // CursorNotFound error code - // Whitelist of error codes that are considered resumable. + // Allowlist of error codes that are considered resumable. resumableChangeStreamErrors = map[int32]struct{}{ 6: {}, // HostUnreachable 7: {}, // HostNotFound @@ -47,11 +47,11 @@ var ( 189: {}, // PrimarySteppedDown 262: {}, // ExceededTimeLimit 9001: {}, // SocketException - 10107: {}, // NotMaster + 10107: {}, // NotPrimary 11600: {}, // InterruptedAtShutdown 11602: {}, // InterruptedDueToReplStateChange - 13435: {}, // NotMasterNoSlaveOk - 13436: {}, // NotMasterOrSecondary + 13435: {}, // NotPrimaryNoSecondaryOK + 13436: {}, // NotPrimaryOrSecondary 63: {}, // StaleShardVersion 150: {}, // StaleEpoch 13388: {}, // StaleConfig @@ -69,21 +69,22 @@ type ChangeStream struct { // TryNext. If continued access is required, a copy must be made. Current bson.Raw - aggregate *operation.Aggregate - pipelineSlice []bsoncore.Document - cursor changeStreamCursor - cursorOptions driver.CursorOptions - batch []bsoncore.Document - resumeToken bson.Raw - err error - sess *session.Client - client *Client - registry *bsoncodec.Registry - streamType StreamType - options *options.ChangeStreamOptions - selector description.ServerSelector - operationTime *primitive.Timestamp - wireVersion *description.VersionRange + aggregate *operation.Aggregate + pipelineSlice []bsoncore.Document + pipelineOptions map[string]bsoncore.Value + cursor changeStreamCursor + cursorOptions driver.CursorOptions + batch []bsoncore.Document + resumeToken bson.Raw + err error + sess *session.Client + client *Client + registry *bsoncodec.Registry + streamType StreamType + options *options.ChangeStreamOptions + selector description.ServerSelector + operationTime *primitive.Timestamp + wireVersion *description.VersionRange } type changeStreamConfig struct { @@ -94,7 +95,7 @@ type changeStreamConfig struct { streamType StreamType collectionName string databaseName string - crypt *driver.Crypt + crypt driver.Crypt } func newChangeStream(ctx context.Context, config changeStreamConfig, pipeline interface{}, @@ -141,7 +142,38 @@ func newChangeStream(ctx context.Context, config changeStreamConfig, pipeline in cs.cursorOptions.BatchSize = *cs.options.BatchSize } if cs.options.MaxAwaitTime != nil { - cs.cursorOptions.MaxTimeMS = int64(time.Duration(*cs.options.MaxAwaitTime) / time.Millisecond) + cs.cursorOptions.MaxTimeMS = int64(*cs.options.MaxAwaitTime / time.Millisecond) + } + if cs.options.Custom != nil { + // Marshal all custom options before passing to the initial aggregate. Return + // any errors from Marshaling. + customOptions := make(map[string]bsoncore.Value) + for optionName, optionValue := range cs.options.Custom { + bsonType, bsonData, err := bson.MarshalValueWithRegistry(cs.registry, optionValue) + if err != nil { + cs.err = err + closeImplicitSession(cs.sess) + return nil, cs.Err() + } + optionValueBSON := bsoncore.Value{Type: bsonType, Data: bsonData} + customOptions[optionName] = optionValueBSON + } + cs.aggregate.CustomOptions(customOptions) + } + if cs.options.CustomPipeline != nil { + // Marshal all custom pipeline options before building pipeline slice. Return + // any errors from Marshaling. + cs.pipelineOptions = make(map[string]bsoncore.Value) + for optionName, optionValue := range cs.options.CustomPipeline { + bsonType, bsonData, err := bson.MarshalValueWithRegistry(cs.registry, optionValue) + if err != nil { + cs.err = err + closeImplicitSession(cs.sess) + return nil, cs.Err() + } + optionValueBSON := bsoncore.Value{Type: bsonType, Data: bsonData} + cs.pipelineOptions[optionName] = optionValueBSON + } } switch cs.streamType { @@ -212,7 +244,7 @@ func (cs *ChangeStream) executeOperation(ctx context.Context, resuming bool) err cs.aggregate.Deployment(cs.createOperationDeployment(server, conn)) if resuming { - cs.replaceOptions(ctx, cs.wireVersion) + cs.replaceOptions(cs.wireVersion) csOptDoc := cs.createPipelineOptionsDoc() pipIdx, pipDoc := bsoncore.AppendDocumentStart(nil) @@ -390,6 +422,11 @@ func (cs *ChangeStream) createPipelineOptionsDoc() bsoncore.Document { plDoc = bsoncore.AppendTimestampElement(plDoc, "startAtOperationTime", cs.options.StartAtOperationTime.T, cs.options.StartAtOperationTime.I) } + // Append custom pipeline options. + for optionName, optionValue := range cs.pipelineOptions { + plDoc = bsoncore.AppendValueElement(plDoc, optionName, optionValue) + } + if plDoc, cs.err = bsoncore.AppendDocumentEnd(plDoc, plDocIdx); cs.err != nil { return nil } @@ -408,7 +445,7 @@ func (cs *ChangeStream) pipelineToBSON() (bsoncore.Document, error) { return pipelineArr, cs.err } -func (cs *ChangeStream) replaceOptions(ctx context.Context, wireVersion *description.VersionRange) { +func (cs *ChangeStream) replaceOptions(wireVersion *description.VersionRange) { // Cached resume token: use the resume token as the resumeAfter option and set no other resume options if cs.resumeToken != nil { cs.options.SetResumeAfter(cs.resumeToken) @@ -604,7 +641,7 @@ func (cs *ChangeStream) isResumableError() bool { return commandErr.HasErrorLabel(resumableErrorLabel) } - // For wire versions below 9, a server error is resumable if its code is on the whitelist. + // For wire versions below 9, a server error is resumable if its code is on the allowlist. _, resumable := resumableChangeStreamErrors[commandErr.Code] return resumable } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go index 5ff82c5f5..36c6e2547 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go @@ -8,6 +8,7 @@ package mongo import ( "context" + "time" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/x/mongo/driver" @@ -35,6 +36,10 @@ func (c *changeStreamDeployment) Connection(context.Context) (driver.Connection, return c.conn, nil } +func (c *changeStreamDeployment) MinRTT() time.Duration { + return c.server.MinRTT() +} + func (c *changeStreamDeployment) ProcessError(err error, conn driver.Connection) driver.ProcessErrorResult { ep, ok := c.server.(driver.ErrorProcessor) if !ok { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/client.go b/vendor/go.mongodb.org/mongo-driver/mongo/client.go index 4f4292079..ddc08bd5f 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/client.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/client.go @@ -25,7 +25,6 @@ import ( "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/auth" - "go.mongodb.org/mongo-driver/x/mongo/driver/connstring" "go.mongodb.org/mongo-driver/x/mongo/driver/ocsp" "go.mongodb.org/mongo-driver/x/mongo/driver/operation" "go.mongodb.org/mongo-driver/x/mongo/driver/session" @@ -52,7 +51,6 @@ type Client struct { id uuid.UUID topologyOptions []topology.Option deployment driver.Deployment - connString connstring.ConnString localThreshold time.Duration retryWrites bool retryReads bool @@ -61,7 +59,6 @@ type Client struct { readConcern *readconcern.ReadConcern writeConcern *writeconcern.WriteConcern registry *bsoncodec.Registry - marshaller BSONAppender monitor *event.CommandMonitor serverAPI *driver.ServerAPIOptions serverMonitor *event.ServerMonitor @@ -71,7 +68,7 @@ type Client struct { keyVaultClientFLE *Client keyVaultCollFLE *Collection mongocryptdFLE *mcryptClient - cryptFLE *driver.Crypt + cryptFLE driver.Crypt metadataClientFLE *Client internalClientFLE *Client } @@ -375,10 +372,22 @@ func (c *Client) configure(opts *options.ClientOptions) error { // ClusterClock c.clock = new(session.ClusterClock) - // Pass down URI so topology can determine whether or not SRV polling is required - topologyOpts = append(topologyOpts, topology.WithURI(func(uri string) string { - return opts.GetURI() - })) + // Pass down URI, SRV service name, and SRV max hosts so topology can poll SRV records correctly. + topologyOpts = append(topologyOpts, + topology.WithURI(func(uri string) string { return opts.GetURI() }), + topology.WithSRVServiceName(func(srvName string) string { + if opts.SRVServiceName != nil { + return *opts.SRVServiceName + } + return "" + }), + topology.WithSRVMaxHosts(func(srvMaxHosts int) int { + if opts.SRVMaxHosts != nil { + return *opts.SRVMaxHosts + } + return 0 + }), + ) // AppName var appName string @@ -425,7 +434,7 @@ func (c *Client) configure(opts *options.ClientOptions) error { // Handshaker var handshaker = func(driver.Handshaker) driver.Handshaker { - return operation.NewIsMaster().AppName(appName).Compressors(comps).ClusterClock(c.clock). + return operation.NewHello().AppName(appName).Compressors(comps).ClusterClock(c.clock). ServerAPI(c.serverAPI).LoadBalanced(loadBalanced) } // Auth & Database & Password & Username @@ -537,6 +546,13 @@ func (c *Client) configure(opts *options.ClientOptions) error { topology.WithMinConnections(func(uint64) uint64 { return *opts.MinPoolSize }), ) } + // MaxConnecting + if opts.MaxConnecting != nil { + serverOpts = append( + serverOpts, + topology.WithMaxConnecting(func(uint64) uint64 { return *opts.MaxConnecting }), + ) + } // PoolMonitor if opts.PoolMonitor != nil { serverOpts = append( @@ -625,6 +641,8 @@ func (c *Client) configure(opts *options.ClientOptions) error { if err := c.configureAutoEncryption(opts); err != nil { return err } + } else { + c.cryptFLE = opts.Crypt } // OCSP cache @@ -669,9 +687,9 @@ func (c *Client) configure(opts *options.ClientOptions) error { // Deployment if opts.Deployment != nil { - // topology options: WithSeedlist and WithURI + // topology options: WithSeedlist, WithURI, WithSRVServiceName and WithSRVMaxHosts // server options: WithClock and WithConnectionOptions - if len(serverOpts) > 2 || len(topologyOpts) > 2 { + if len(serverOpts) > 2 || len(topologyOpts) > 4 { return errors.New("cannot specify topology or server options with a deployment") } c.deployment = opts.Deployment @@ -779,11 +797,13 @@ func (c *Client) configureCryptFLE(opts *options.AutoEncryptionOptions) error { if !bypass { cir = collInfoRetriever{client: c.metadataClientFLE} } + cryptOpts := &driver.CryptOptions{ CollInfoFn: cir.cryptCollInfo, KeyFn: kr.cryptKeys, MarkFn: c.mongocryptdFLE.markCommand, KmsProviders: kmsProviders, + TLSConfig: opts.TLSConfig, BypassAutoEncryption: bypass, SchemaMap: cryptSchemaMap, } @@ -823,7 +843,7 @@ func (c *Client) Database(name string, opts ...*options.DatabaseOptions) *Databa // databases are included in the result. It cannot be nil. An empty document (e.g. bson.D{}) should be used to include // all databases. // -// The opts paramter can be used to specify options for this operation (see the options.ListDatabasesOptions documentation). +// The opts parameter can be used to specify options for this operation (see the options.ListDatabasesOptions documentation). // // For more information about the command, see https://docs.mongodb.com/manual/reference/command/listDatabases/. func (c *Client) ListDatabases(ctx context.Context, filter interface{}, opts ...*options.ListDatabasesOptions) (ListDatabasesResult, error) { @@ -834,6 +854,9 @@ func (c *Client) ListDatabases(ctx context.Context, filter interface{}, opts ... sess := sessionFromContext(ctx) err := c.validSession(sess) + if err != nil { + return ListDatabasesResult{}, err + } if sess == nil && c.sessionPool != nil { sess, err = session.NewClientSession(c.sessionPool, c.id, session.Implicit) if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go b/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go index 4b1f12d39..fe4646b64 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go @@ -22,7 +22,7 @@ import ( // ClientEncryption is used to create data keys and explicitly encrypt and decrypt BSON values. type ClientEncryption struct { - crypt *driver.Crypt + crypt driver.Crypt keyVaultClient *Client keyVaultColl *Collection } @@ -54,6 +54,7 @@ func NewClientEncryption(keyVaultClient *Client, opts ...*options.ClientEncrypti KeyFn: kr.cryptKeys, CollInfoFn: cir.cryptCollInfo, KmsProviders: kmsProviders, + TLSConfig: ceo.TLSConfig, }) if err != nil { return nil, err diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go index 02e197d5a..590d92804 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go @@ -225,6 +225,7 @@ func (coll *Collection) BulkWrite(ctx context.Context, models []WriteModel, collection: coll, selector: selector, writeConcern: wc, + let: bwo.Let, } err = op.execute(ctx) @@ -454,6 +455,13 @@ func (coll *Collection) delete(ctx context.Context, filter interface{}, deleteOn if do.Hint != nil { op = op.Hint(true) } + if do.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, do.Let, true, "let") + if err != nil { + return nil, err + } + op = op.Let(let) + } // deleteMany cannot be retried retryMode := driver.RetryNone @@ -465,7 +473,7 @@ func (coll *Collection) delete(ctx context.Context, filter interface{}, deleteOn if rr&expectedRr == 0 { return nil, err } - return &DeleteResult{DeletedCount: int64(op.Result().N)}, err + return &DeleteResult{DeletedCount: op.Result().N}, err } // DeleteOne executes a delete command to delete at most one document from the collection. @@ -548,6 +556,13 @@ func (coll *Collection) updateOrReplace(ctx context.Context, filter bsoncore.Doc Database(coll.db.name).Collection(coll.name). Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).Hint(uo.Hint != nil). ArrayFilters(uo.ArrayFilters != nil).Ordered(true).ServerAPI(coll.client.serverAPI) + if uo.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, uo.Let, true, "let") + if err != nil { + return nil, err + } + op = op.Let(let) + } if uo.BypassDocumentValidation != nil && *uo.BypassDocumentValidation { op = op.BypassDocumentValidation(*uo.BypassDocumentValidation) @@ -567,8 +582,8 @@ func (coll *Collection) updateOrReplace(ctx context.Context, filter bsoncore.Doc opRes := op.Result() res := &UpdateResult{ - MatchedCount: int64(opRes.N), - ModifiedCount: int64(opRes.NModified), + MatchedCount: opRes.N, + ModifiedCount: opRes.NModified, UpsertedCount: int64(len(opRes.Upserted)), } if len(opRes.Upserted) > 0 { @@ -693,11 +708,15 @@ func (coll *Collection) ReplaceOne(ctx context.Context, filter interface{}, updateOptions := make([]*options.UpdateOptions, 0, len(opts)) for _, opt := range opts { + if opt == nil { + continue + } uOpts := options.Update() uOpts.BypassDocumentValidation = opt.BypassDocumentValidation uOpts.Collation = opt.Collation uOpts.Upsert = opt.Upsert uOpts.Hint = opt.Hint + uOpts.Let = opt.Let updateOptions = append(updateOptions, uOpts) } @@ -736,8 +755,7 @@ func (coll *Collection) Aggregate(ctx context.Context, pipeline interface{}, } // aggreate is the helper method for Aggregate -func aggregate(a aggregateParams) (*Cursor, error) { - +func aggregate(a aggregateParams) (cur *Cursor, err error) { if a.ctx == nil { a.ctx = context.Background() } @@ -748,6 +766,12 @@ func aggregate(a aggregateParams) (*Cursor, error) { } sess := sessionFromContext(a.ctx) + // Always close any created implicit sessions if aggregate returns an error. + defer func() { + if err != nil && sess != nil { + closeImplicitSession(sess) + } + }() if sess == nil && a.client.sessionPool != nil { sess, err = session.NewClientSession(a.client.sessionPool, a.client.id, session.Implicit) if err != nil { @@ -772,9 +796,9 @@ func aggregate(a aggregateParams) (*Cursor, error) { sess = nil } - selector := makePinnedSelector(sess, a.writeSelector) - if !hasOutputStage { - selector = makeReadPrefSelector(sess, a.readSelector, a.client.localThreshold) + selector := makeReadPrefSelector(sess, a.readSelector, a.client.localThreshold) + if hasOutputStage { + selector = makeOutputAggregateSelector(sess, a.readPreference, a.client.localThreshold) } ao := options.MergeAggregateOptions(a.opts...) @@ -784,6 +808,7 @@ func aggregate(a aggregateParams) (*Cursor, error) { Session(sess). WriteConcern(wc). ReadConcern(rc). + ReadPreference(a.readPreference). CommandMonitor(a.client.monitor). ServerSelector(selector). ClusterClock(a.client.clock). @@ -791,13 +816,8 @@ func aggregate(a aggregateParams) (*Cursor, error) { Collection(a.col). Deployment(a.client.deployment). Crypt(a.client.cryptFLE). - ServerAPI(a.client.serverAPI) - if !hasOutputStage { - // Only pass the user-specified read preference if the aggregation doesn't have a $out or $merge stage. - // Otherwise, the read preference could be forwarded to a mongos, which would error if the aggregation were - // executed against a non-primary node. - op.ReadPreference(a.readPreference) - } + ServerAPI(a.client.serverAPI). + HasOutputStage(hasOutputStage) if ao.AllowDiskUse != nil { op.AllowDiskUse(*ao.AllowDiskUse) @@ -825,7 +845,6 @@ func aggregate(a aggregateParams) (*Cursor, error) { if ao.Hint != nil { hintVal, err := transformValue(a.registry, ao.Hint, false, "hint") if err != nil { - closeImplicitSession(sess) return nil, err } op.Hint(hintVal) @@ -833,11 +852,24 @@ func aggregate(a aggregateParams) (*Cursor, error) { if ao.Let != nil { let, err := transformBsoncoreDocument(a.registry, ao.Let, true, "let") if err != nil { - closeImplicitSession(sess) return nil, err } op.Let(let) } + if ao.Custom != nil { + // Marshal all custom options before passing to the aggregate operation. Return + // any errors from Marshaling. + customOptions := make(map[string]bsoncore.Value) + for optionName, optionValue := range ao.Custom { + bsonType, bsonData, err := bson.MarshalValueWithRegistry(a.registry, optionValue) + if err != nil { + return nil, err + } + optionValueBSON := bsoncore.Value{Type: bsonType, Data: bsonData} + customOptions[optionName] = optionValueBSON + } + op.CustomOptions(customOptions) + } retry := driver.RetryNone if a.retryRead && !hasOutputStage { @@ -847,7 +879,6 @@ func aggregate(a aggregateParams) (*Cursor, error) { err = op.Execute(a.ctx) if err != nil { - closeImplicitSession(sess) if wce, ok := err.(driver.WriteCommandError); ok && wce.WriteConcernError != nil { return nil, *convertDriverWriteConcernError(wce.WriteConcernError) } @@ -856,7 +887,6 @@ func aggregate(a aggregateParams) (*Cursor, error) { bc, err := op.Result(cursorOpts) if err != nil { - closeImplicitSession(sess) return nil, replaceErrors(err) } cursor, err := newCursorWithSession(bc, a.registry, sess) @@ -1049,7 +1079,7 @@ func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter i selector := makeReadPrefSelector(sess, coll.readSelector, coll.client.localThreshold) option := options.MergeDistinctOptions(opts...) - op := operation.NewDistinct(fieldName, bsoncore.Document(f)). + op := operation.NewDistinct(fieldName, f). Session(sess).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name).CommandMonitor(coll.client.monitor). Deployment(coll.client.deployment).ReadConcern(rc).ReadPreference(coll.readPreference). @@ -1104,7 +1134,7 @@ func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter i // // For more information about the command, see https://docs.mongodb.com/manual/reference/command/find/. func (coll *Collection) Find(ctx context.Context, filter interface{}, - opts ...*options.FindOptions) (*Cursor, error) { + opts ...*options.FindOptions) (cur *Cursor, err error) { if ctx == nil { ctx = context.Background() @@ -1116,6 +1146,12 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, } sess := sessionFromContext(ctx) + // Always close any created implicit sessions if Find returns an error. + defer func() { + if err != nil && sess != nil { + closeImplicitSession(sess) + } + }() if sess == nil && coll.client.sessionPool != nil { var err error sess, err = session.NewClientSession(coll.client.sessionPool, coll.client.id, session.Implicit) @@ -1126,7 +1162,6 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, err = coll.client.validSession(sess) if err != nil { - closeImplicitSession(sess) return nil, err } @@ -1173,11 +1208,17 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.Hint != nil { hint, err := transformValue(coll.registry, fo.Hint, false, "hint") if err != nil { - closeImplicitSession(sess) return nil, err } op.Hint(hint) } + if fo.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, fo.Let, true, "let") + if err != nil { + return nil, err + } + op.Let(let) + } if fo.Limit != nil { limit := *fo.Limit if limit < 0 { @@ -1190,7 +1231,6 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.Max != nil { max, err := transformBsoncoreDocument(coll.registry, fo.Max, true, "max") if err != nil { - closeImplicitSession(sess) return nil, err } op.Max(max) @@ -1204,7 +1244,6 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.Min != nil { min, err := transformBsoncoreDocument(coll.registry, fo.Min, true, "min") if err != nil { - closeImplicitSession(sess) return nil, err } op.Min(min) @@ -1218,7 +1257,6 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.Projection != nil { proj, err := transformBsoncoreDocument(coll.registry, fo.Projection, true, "projection") if err != nil { - closeImplicitSession(sess) return nil, err } op.Projection(proj) @@ -1238,7 +1276,6 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.Sort != nil { sort, err := transformBsoncoreDocument(coll.registry, fo.Sort, false, "sort") if err != nil { - closeImplicitSession(sess) return nil, err } op.Sort(sort) @@ -1250,13 +1287,11 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, op = op.Retry(retry) if err = op.Execute(ctx); err != nil { - closeImplicitSession(sess) return nil, replaceErrors(err) } bc, err := op.Result(cursorOpts) if err != nil { - closeImplicitSession(sess) return nil, replaceErrors(err) } return newCursorWithSession(bc, coll.registry, sess) @@ -1278,9 +1313,12 @@ func (coll *Collection) FindOne(ctx context.Context, filter interface{}, ctx = context.Background() } - findOpts := make([]*options.FindOptions, len(opts)) - for i, opt := range opts { - findOpts[i] = &options.FindOptions{ + findOpts := make([]*options.FindOptions, 0, len(opts)) + for _, opt := range opts { + if opt == nil { + continue + } + findOpts = append(findOpts, &options.FindOptions{ AllowPartialResults: opt.AllowPartialResults, BatchSize: opt.BatchSize, Collation: opt.Collation, @@ -1299,7 +1337,7 @@ func (coll *Collection) FindOne(ctx context.Context, filter interface{}, Skip: opt.Skip, Snapshot: opt.Snapshot, Sort: opt.Sort, - } + }) } // Unconditionally send a limit to make sure only one document is returned and the cursor is not kept open // by the server. @@ -1410,6 +1448,13 @@ func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{} } op = op.Hint(hint) } + if fod.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, fod.Let, true, "let") + if err != nil { + return &SingleResult{err: err} + } + op = op.Let(let) + } return coll.findAndModify(ctx, op) } @@ -1482,6 +1527,13 @@ func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{ } op = op.Hint(hint) } + if fo.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, fo.Let, true, "let") + if err != nil { + return &SingleResult{err: err} + } + op = op.Let(let) + } return coll.findAndModify(ctx, op) } @@ -1565,6 +1617,13 @@ func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{} } op = op.Hint(hint) } + if fo.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, fo.Let, true, "let") + if err != nil { + return &SingleResult{err: err} + } + op = op.Let(let) + } return coll.findAndModify(ctx, op) } @@ -1680,3 +1739,16 @@ func makeReadPrefSelector(sess *session.Client, selector description.ServerSelec return makePinnedSelector(sess, selector) } + +func makeOutputAggregateSelector(sess *session.Client, rp *readpref.ReadPref, localThreshold time.Duration) description.ServerSelectorFunc { + if sess != nil && sess.TransactionRunning() { + // Use current transaction's read preference if available + rp = sess.CurrentRp + } + + selector := description.CompositeSelector([]description.ServerSelector{ + description.OutputAggregateSelector(rp), + description.LatencySelector(localThreshold), + }) + return makePinnedSelector(sess, selector) +} diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/crypt_retrievers.go b/vendor/go.mongodb.org/mongo-driver/mongo/crypt_retrievers.go index e29523e48..5e96da731 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/crypt_retrievers.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/crypt_retrievers.go @@ -18,6 +18,9 @@ type keyRetriever struct { } func (kr *keyRetriever) cryptKeys(ctx context.Context, filter bsoncore.Document) ([]bsoncore.Document, error) { + // Remove the explicit session from the context if one is set. + // The explicit session may be from a different client. + ctx = NewSessionContext(ctx, nil) cursor, err := kr.coll.Find(ctx, filter) if err != nil { return nil, EncryptionKeyVaultError{Wrapped: err} @@ -43,6 +46,9 @@ type collInfoRetriever struct { } func (cir *collInfoRetriever) cryptCollInfo(ctx context.Context, db string, filter bsoncore.Document) (bsoncore.Document, error) { + // Remove the explicit session from the context if one is set. + // The explicit session may be from a different client. + ctx = NewSessionContext(ctx, nil) cursor, err := cir.client.Database(db).ListCollections(ctx, filter) if err != nil { return nil, err diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go b/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go index 3ec03baf4..533cfce07 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go @@ -15,6 +15,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" + "go.mongodb.org/mongo-driver/x/bsonx" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/session" @@ -67,6 +68,47 @@ func newEmptyCursor() *Cursor { return &Cursor{bc: driver.NewEmptyBatchCursor()} } +// NewCursorFromDocuments creates a new Cursor pre-loaded with the provided documents, error and registry. If no registry is provided, +// bson.DefaultRegistry will be used. +// +// The documents parameter must be a slice of documents. The slice may be nil or empty, but all elements must be non-nil. +func NewCursorFromDocuments(documents []interface{}, err error, registry *bsoncodec.Registry) (*Cursor, error) { + if registry == nil { + registry = bson.DefaultRegistry + } + + // Convert documents slice to a sequence-style byte array. + var docsBytes []byte + for _, doc := range documents { + switch t := doc.(type) { + case nil: + return nil, ErrNilDocument + case bsonx.Doc: + doc = t.Copy() + case []byte: + // Slight optimization so we'll just use MarshalBSON and not go through the codec machinery. + doc = bson.Raw(t) + } + var marshalErr error + docsBytes, marshalErr = bson.MarshalAppendWithRegistry(registry, docsBytes, doc) + if marshalErr != nil { + return nil, marshalErr + } + } + + c := &Cursor{ + bc: driver.NewBatchCursorFromDocuments(docsBytes), + registry: registry, + err: err, + } + + // Initialize batch and batchLength here. The underlying batch cursor will be preloaded with the + // provided contents, and thus already has a batch before calls to Next/TryNext. + c.batch = c.bc.Batch() + c.batchLength = c.bc.Batch().DocumentCount() + return c, nil +} + // ID returns the ID of this cursor, or 0 if the cursor has been closed or exhausted. func (c *Cursor) ID() int64 { return c.bc.ID() } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/database.go b/vendor/go.mongodb.org/mongo-driver/mongo/database.go index 207873344..b0066f04d 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/database.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/database.go @@ -303,6 +303,9 @@ func (db *Database) Drop(ctx context.Context) error { // documentation). // // For more information about the command, see https://docs.mongodb.com/manual/reference/command/listCollections/. +// +// BUG(benjirewis): ListCollectionSpecifications prevents listing more than 100 collections per database when running +// against MongoDB version 2.6. func (db *Database) ListCollectionSpecifications(ctx context.Context, filter interface{}, opts ...*options.ListCollectionsOptions) ([]*CollectionSpecification, error) { @@ -337,6 +340,9 @@ func (db *Database) ListCollectionSpecifications(ctx context.Context, filter int // documentation). // // For more information about the command, see https://docs.mongodb.com/manual/reference/command/listCollections/. +// +// BUG(benjirewis): ListCollections prevents listing more than 100 collections per database when running against +// MongoDB version 2.6. func (db *Database) ListCollections(ctx context.Context, filter interface{}, opts ...*options.ListCollectionsOptions) (*Cursor, error) { if ctx == nil { ctx = context.Background() @@ -382,6 +388,9 @@ func (db *Database) ListCollections(ctx context.Context, filter interface{}, opt cursorOpts.BatchSize = *lco.BatchSize op = op.BatchSize(*lco.BatchSize) } + if lco.AuthorizedCollections != nil { + op = op.AuthorizedCollections(*lco.AuthorizedCollections) + } retry := driver.RetryNone if db.client.retryReads { @@ -415,6 +424,9 @@ func (db *Database) ListCollections(ctx context.Context, filter interface{}, opt // documentation). // // For more information about the command, see https://docs.mongodb.com/manual/reference/command/listCollections/. +// +// BUG(benjirewis): ListCollectionNames prevents listing more than 100 collections per database when running against +// MongoDB version 2.6. func (db *Database) ListCollectionNames(ctx context.Context, filter interface{}, opts ...*options.ListCollectionsOptions) ([]string, error) { opts = append(opts, options.ListCollections().SetNameOnly(true)) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/description/server.go b/vendor/go.mongodb.org/mongo-driver/mongo/description/server.go index 7de7df5e6..405efe94b 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/description/server.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/description/server.go @@ -25,7 +25,7 @@ type SelectedServer struct { Kind TopologyKind } -// Server contains information about a node in a cluster. This is created from isMaster command responses. If the value +// Server contains information about a node in a cluster. This is created from hello command responses. If the value // of the Kind field is LoadBalancer, only the Addr and Kind fields will be set. All other fields will be set to the // zero value of the field's type. type Server struct { @@ -38,6 +38,7 @@ type Server struct { CanonicalAddr address.Address ElectionID primitive.ObjectID HeartbeatInterval time.Duration + HelloOK bool Hosts []string LastError error LastUpdateTime time.Time @@ -47,6 +48,7 @@ type Server struct { MaxMessageSize uint32 Members []address.Address Passives []string + Passive bool Primary address.Address ReadOnly bool ServiceID *primitive.ObjectID // Only set for servers that are deployed behind a load balancer. @@ -59,7 +61,7 @@ type Server struct { WireVersion *VersionRange } -// NewServer creates a new server description from the given isMaster command response. +// NewServer creates a new server description from the given hello command response. func NewServer(addr address.Address, response bson.Raw) Server { desc := Server{Addr: addr, CanonicalAddr: addr, LastUpdateTime: time.Now().UTC()} elements, err := response.Elements() @@ -99,6 +101,12 @@ func NewServer(addr address.Address, response bson.Raw) Server { desc.LastError = fmt.Errorf("expected 'electionId' to be a objectID but it's a BSON %s", element.Value().Type) return desc } + case "helloOk": + desc.HelloOK, ok = element.Value().BooleanOK() + if !ok { + desc.LastError = fmt.Errorf("expected 'helloOk' to be a boolean but it's a BSON %s", element.Value().Type) + return desc + } case "hidden": hidden, ok = element.Value().BooleanOK() if !ok { @@ -118,10 +126,10 @@ func NewServer(addr address.Address, response bson.Raw) Server { desc.LastError = fmt.Errorf("expected 'isWritablePrimary' to be a boolean but it's a BSON %s", element.Value().Type) return desc } - case "ismaster": + case internal.LegacyHelloLowercase: isWritablePrimary, ok = element.Value().BooleanOK() if !ok { - desc.LastError = fmt.Errorf("expected 'ismaster' to be a boolean but it's a BSON %s", element.Value().Type) + desc.LastError = fmt.Errorf("expected legacy hello to be a boolean but it's a BSON %s", element.Value().Type) return desc } case "isreplicaset": @@ -215,6 +223,12 @@ func NewServer(addr address.Address, response bson.Raw) Server { desc.LastError = err return desc } + case "passive": + desc.Passive, ok = element.Value().BooleanOK() + if !ok { + desc.LastError = fmt.Errorf("expected 'passive' to be a boolean but it's a BSON %s", element.Value().Type) + return desc + } case "primary": primary, ok := element.Value().StringValueOK() if !ok { @@ -272,10 +286,6 @@ func NewServer(addr address.Address, response bson.Raw) Server { desc.LastError = err return desc } - - if internal.SetMockServiceID { - desc.ServiceID = &desc.TopologyVersion.ProcessID - } } } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go b/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go index f0c728e4b..8e810cb9c 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go @@ -130,10 +130,20 @@ func WriteSelector() ServerSelector { // ReadPrefSelector selects servers based on the provided read preference. func ReadPrefSelector(rp *readpref.ReadPref) ServerSelector { + return readPrefSelector(rp, false) +} + +// OutputAggregateSelector selects servers based on the provided read preference given that the underlying operation is +// aggregate with an output stage. +func OutputAggregateSelector(rp *readpref.ReadPref) ServerSelector { + return readPrefSelector(rp, true) +} + +func readPrefSelector(rp *readpref.ReadPref, isOutputAggregate bool) ServerSelector { return ServerSelectorFunc(func(t Topology, candidates []Server) ([]Server, error) { if t.Kind == LoadBalanced { // In LoadBalanced mode, there should only be one server in the topology and it must be selected. We check - // this before checking MaxStaleness support becuase there's no monitoring in this mode, so the candidate + // this before checking MaxStaleness support because there's no monitoring in this mode, so the candidate // server wouldn't have a wire version set, which would result in an error. return candidates, nil } @@ -152,7 +162,7 @@ func ReadPrefSelector(rp *readpref.ReadPref) ServerSelector { case Single: return candidates, nil case ReplicaSetNoPrimary, ReplicaSetWithPrimary: - return selectForReplicaSet(rp, t, candidates) + return selectForReplicaSet(rp, isOutputAggregate, t, candidates) case Sharded: return selectByKind(candidates, Mongos), nil } @@ -170,11 +180,21 @@ func maxStalenessSupported(wireVersion *VersionRange) error { return nil } -func selectForReplicaSet(rp *readpref.ReadPref, t Topology, candidates []Server) ([]Server, error) { +func selectForReplicaSet(rp *readpref.ReadPref, isOutputAggregate bool, t Topology, candidates []Server) ([]Server, error) { if err := verifyMaxStaleness(rp, t); err != nil { return nil, err } + // If underlying operation is an aggregate with an output stage, only apply read preference + // if all candidates are 5.0+. Otherwise, operate under primary read preference. + if isOutputAggregate { + for _, s := range candidates { + if s.WireVersion.Max < 13 { + return selectByKind(candidates, RSPrimary), nil + } + } + } + switch rp.Mode() { case readpref.PrimaryMode: return selectByKind(candidates, RSPrimary), nil diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/doc.go b/vendor/go.mongodb.org/mongo-driver/mongo/doc.go index d184a575a..669aa14c9 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/doc.go @@ -105,8 +105,8 @@ // // Note: Auto encryption is an enterprise-only feature. // -// The libmongocrypt C library is required when using client-side encryption. libmongocrypt version 1.1.0 or higher is -// required when using driver version 1.5.0 or higher. To install libmongocrypt, follow the instructions for your +// The libmongocrypt C library is required when using client-side encryption. libmongocrypt version 1.3.0 or higher is +// required when using driver version 1.8.0 or higher. To install libmongocrypt, follow the instructions for your // operating system: // // 1. Linux: follow the instructions listed at diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/errors.go b/vendor/go.mongodb.org/mongo-driver/mongo/errors.go index 2c3ae1579..a16efab06 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/errors.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/errors.go @@ -56,6 +56,7 @@ func replaceErrors(err error) error { Labels: de.Labels, Name: de.Name, Wrapped: de.Wrapped, + Raw: bson.Raw(de.Raw), } } if qe, ok := err.(driver.QueryFailureError); ok { @@ -63,6 +64,7 @@ func replaceErrors(err error) error { ce := CommandError{ Name: qe.Message, Wrapped: qe.Wrapped, + Raw: bson.Raw(qe.Response), } dollarErr, err := qe.Response.LookupErr("$err") @@ -207,6 +209,7 @@ type ServerError interface { } var _ ServerError = CommandError{} +var _ ServerError = WriteError{} var _ ServerError = WriteException{} var _ ServerError = BulkWriteException{} @@ -217,6 +220,7 @@ type CommandError struct { Labels []string // Categories to which the error belongs Name string // A human-readable name corresponding to the error code Wrapped error // The underlying error, if one exists. + Raw bson.Raw // The original server response containing the error. } // Error implements the error interface. @@ -276,6 +280,9 @@ type WriteError struct { Code int Message string Details bson.Raw + + // The original write error from the server response. + Raw bson.Raw } func (we WriteError) Error() string { @@ -286,6 +293,30 @@ func (we WriteError) Error() string { return msg } +// HasErrorCode returns true if the error has the specified code. +func (we WriteError) HasErrorCode(code int) bool { + return we.Code == code +} + +// HasErrorLabel returns true if the error contains the specified label. WriteErrors do not contain labels, +// so we always return false. +func (we WriteError) HasErrorLabel(label string) bool { + return false +} + +// HasErrorMessage returns true if the error contains the specified message. +func (we WriteError) HasErrorMessage(message string) bool { + return strings.Contains(we.Message, message) +} + +// HasErrorCodeWithMessage returns true if the error has the specified code and Message contains the specified message. +func (we WriteError) HasErrorCodeWithMessage(code int, message string) bool { + return we.Code == code && strings.Contains(we.Message, message) +} + +// serverError implements the ServerError interface. +func (we WriteError) serverError() {} + // WriteErrors is a group of write errors that occurred during execution of a write operation. type WriteErrors []WriteError @@ -307,6 +338,7 @@ func writeErrorsFromDriverWriteErrors(errs driver.WriteErrors) WriteErrors { Code: int(err.Code), Message: err.Message, Details: bson.Raw(err.Details), + Raw: bson.Raw(err.Raw), }) } return wes @@ -319,6 +351,7 @@ type WriteConcernError struct { Code int Message string Details bson.Raw + Raw bson.Raw // The original write concern error from the server response. } // Error implements the error interface. @@ -340,6 +373,9 @@ type WriteException struct { // The categories to which the exception belongs. Labels []string + + // The original server response containing the error. + Raw bson.Raw } // Error implements the error interface. @@ -426,6 +462,7 @@ func convertDriverWriteConcernError(wce *driver.WriteConcernError) *WriteConcern Code: int(wce.Code), Message: wce.Message, Details: bson.Raw(wce.Details), + Raw: bson.Raw(wce.Raw), } } @@ -559,6 +596,7 @@ func processWriteError(err error) (returnResult, error) { WriteConcernError: convertDriverWriteConcernError(tt.WriteConcernError), WriteErrors: writeErrorsFromDriverWriteErrors(tt.WriteErrors), Labels: tt.Labels, + Raw: bson.Raw(tt.Raw), } default: return rrNone, replaceErrors(err) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go b/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go index 18fc52443..e8e260f16 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go @@ -272,7 +272,8 @@ func (iv IndexView) CreateMany(ctx context.Context, models []IndexModel, opts .. err = op.Execute(ctx) if err != nil { - return nil, replaceErrors(err) + _, err = processWriteError(err) + return nil, err } return names, nil diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go b/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go index 3ceb7aa97..da29175c1 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go @@ -161,21 +161,6 @@ func transformBsoncoreDocument(registry *bsoncodec.Registry, val interface{}, ma return b, nil } -func ensureID(d bsonx.Doc) (bsonx.Doc, interface{}) { - var id interface{} - - elem, err := d.LookupElementErr("_id") - switch err.(type) { - case nil: - id = elem - default: - oid := primitive.NewObjectID() - d = append(d, bsonx.Elem{"_id", bsonx.ObjectID(oid)}) - id = oid - } - return d, id -} - func ensureDollarKey(doc bsoncore.Document) error { firstElem, err := doc.IndexErr(0) if err != nil { @@ -190,7 +175,7 @@ func ensureDollarKey(doc bsoncore.Document) error { func ensureNoDollarKey(doc bsoncore.Document) error { if elem, err := doc.IndexErr(0); err == nil && strings.HasPrefix(elem.Key(), "$") { - return errors.New("replacement document cannot contains keys beginning with '$") + return errors.New("replacement document cannot contain keys beginning with '$'") } return nil @@ -225,20 +210,43 @@ func transformAggregatePipeline(registry *bsoncodec.Registry, pipeline interface return nil, false, fmt.Errorf("can only transform slices and arrays into aggregation pipelines, but got %v", val.Kind()) } - aidx, arr := bsoncore.AppendArrayStart(nil) var hasOutputStage bool valLen := val.Len() + switch t := pipeline.(type) { // Explicitly forbid non-empty pipelines that are semantically single documents // and are implemented as slices. - switch t := pipeline.(type) { case bson.D, bson.Raw, bsoncore.Document: if valLen > 0 { return nil, false, fmt.Errorf("%T is not an allowed pipeline type as it represents a single document. Use bson.A or mongo.Pipeline instead", t) } + // bsoncore.Arrays do not need to be transformed. Only check validity and presence of output stage. + case bsoncore.Array: + if err := t.Validate(); err != nil { + return nil, false, err + } + + values, err := t.Values() + if err != nil { + return nil, false, err + } + + numVals := len(values) + if numVals == 0 { + return bsoncore.Document(t), false, nil + } + + // If not empty, check if first value of the last stage is $out or $merge. + if lastStage, ok := values[numVals-1].DocumentOK(); ok { + if elem, err := lastStage.IndexErr(0); err == nil && (elem.Key() == "$out" || elem.Key() == "$merge") { + hasOutputStage = true + } + } + return bsoncore.Document(t), hasOutputStage, nil } + aidx, arr := bsoncore.AppendArrayStart(nil) for idx := 0; idx < valLen; idx++ { doc, err := transformBsoncoreDocument(registry, val.Index(idx).Interface(), true, fmt.Sprintf("pipeline stage :%v", idx)) if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go b/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go index 2eab62f97..c36b1d31c 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go @@ -77,6 +77,10 @@ func newMcryptClient(opts *options.AutoEncryptionOptions) (*mcryptClient, error) // markCommand executes the given command on mongocryptd. func (mc *mcryptClient) markCommand(ctx context.Context, dbName string, cmd bsoncore.Document) (bsoncore.Document, error) { + // Remove the explicit session from the context if one is set. + // The explicit session will be from a different client. + // If an explicit session is set, it is applied after automatic encryption. + ctx = NewSessionContext(ctx, nil) db := mc.client.Database(dbName, databaseOpts) res, err := db.RunCommand(ctx, cmd).DecodeBytes() @@ -111,6 +115,8 @@ func (mc *mcryptClient) disconnect(ctx context.Context) error { } func (mc *mcryptClient) spawnProcess() error { + // Ignore gosec warning about subprocess launched with externally-provided path variable. + /* #nosec G204 */ cmd := exec.Command(mc.path, mc.spawnArgs...) cmd.Stdout = nil cmd.Stderr = nil diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go index e1f710fd2..cf0da5fcb 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go @@ -6,7 +6,11 @@ package options -import "time" +import ( + "time" + + "go.mongodb.org/mongo-driver/bson" +) // AggregateOptions represents options that can be used to configure an Aggregate operation. type AggregateOptions struct { @@ -50,6 +54,11 @@ type AggregateOptions struct { // Values must be constant or closed expressions that do not reference document fields. Parameters can then be // accessed as variables in an aggregate expression context (e.g. "$$var"). Let interface{} + + // Custom options to be added to aggregate expression. Key-value pairs of the BSON map should correlate with desired + // option names and values. Values must be Marshalable. Custom options may conflict with non-custom options, and custom + // options bypass client-side validation. Prefer using non-custom options where possible. + Custom bson.M } // Aggregate creates a new AggregateOptions instance. @@ -111,6 +120,15 @@ func (ao *AggregateOptions) SetLet(let interface{}) *AggregateOptions { return ao } +// SetCustom sets the value for the Custom field. Key-value pairs of the BSON map should correlate +// with desired option names and values. Values must be Marshalable. Custom options may conflict +// with non-custom options, and custom options bypass client-side validation. Prefer using non-custom +// options where possible. +func (ao *AggregateOptions) SetCustom(c bson.M) *AggregateOptions { + ao.Custom = c + return ao +} + // MergeAggregateOptions combines the given AggregateOptions instances into a single AggregateOptions in a last-one-wins // fashion. func MergeAggregateOptions(opts ...*AggregateOptions) *AggregateOptions { @@ -146,6 +164,9 @@ func MergeAggregateOptions(opts ...*AggregateOptions) *AggregateOptions { if ao.Let != nil { aggOpts.Let = ao.Let } + if ao.Custom != nil { + aggOpts.Custom = ao.Custom + } } return aggOpts diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go index 517b69e6c..89c3c05f1 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go @@ -6,6 +6,10 @@ package options +import ( + "crypto/tls" +) + // AutoEncryptionOptions represents options used to configure auto encryption/decryption behavior for a mongo.Client // instance. // @@ -27,6 +31,7 @@ type AutoEncryptionOptions struct { SchemaMap map[string]interface{} BypassAutoEncryption *bool ExtraOptions map[string]interface{} + TLSConfig map[string]*tls.Config } // AutoEncryption creates a new AutoEncryptionOptions configured with default values. @@ -91,6 +96,23 @@ func (a *AutoEncryptionOptions) SetExtraOptions(extraOpts map[string]interface{} return a } +// SetTLSConfig specifies tls.Config instances for each KMS provider to use to configure TLS on all connections created +// to the KMS provider. +// +// This should only be used to set custom TLS configurations. By default, the connection will use an empty tls.Config{} with MinVersion set to tls.VersionTLS12. +func (a *AutoEncryptionOptions) SetTLSConfig(tlsOpts map[string]*tls.Config) *AutoEncryptionOptions { + tlsConfigs := make(map[string]*tls.Config) + for provider, config := range tlsOpts { + // use TLS min version 1.2 to enforce more secure hash algorithms and advanced cipher suites + if config.MinVersion == 0 { + config.MinVersion = tls.VersionTLS12 + } + tlsConfigs[provider] = config + } + a.TLSConfig = tlsConfigs + return a +} + // MergeAutoEncryptionOptions combines the argued AutoEncryptionOptions in a last-one wins fashion. func MergeAutoEncryptionOptions(opts ...*AutoEncryptionOptions) *AutoEncryptionOptions { aeo := AutoEncryption() @@ -117,6 +139,9 @@ func MergeAutoEncryptionOptions(opts ...*AutoEncryptionOptions) *AutoEncryptionO if opt.ExtraOptions != nil { aeo.ExtraOptions = opt.ExtraOptions } + if opt.TLSConfig != nil { + aeo.TLSConfig = opt.TLSConfig + } } return aeo diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go index 57f98f83d..2786ab2c5 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go @@ -19,6 +19,12 @@ type BulkWriteOptions struct { // If true, no writes will be executed after one fails. The default value is true. Ordered *bool + + // Specifies parameters for all update and delete commands in the BulkWrite. This option is only valid for MongoDB + // versions >= 5.0. Older servers will report an error for using this option. This must be a document mapping + // parameter names to values. Values must be constant or closed expressions that do not reference document fields. + // Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + Let interface{} } // BulkWrite creates a new *BulkWriteOptions instance. @@ -40,6 +46,15 @@ func (b *BulkWriteOptions) SetBypassDocumentValidation(bypass bool) *BulkWriteOp return b } +// SetLet sets the value for the Let field. Let specifies parameters for all update and delete commands in the BulkWrite. +// This option is only valid for MongoDB versions >= 5.0. Older servers will report an error for using this option. +// This must be a document mapping parameter names to values. Values must be constant or closed expressions that do not +// reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). +func (b *BulkWriteOptions) SetLet(let interface{}) *BulkWriteOptions { + b.Let = &let + return b +} + // MergeBulkWriteOptions combines the given BulkWriteOptions instances into a single BulkWriteOptions in a last-one-wins // fashion. func MergeBulkWriteOptions(opts ...*BulkWriteOptions) *BulkWriteOptions { @@ -54,6 +69,9 @@ func MergeBulkWriteOptions(opts ...*BulkWriteOptions) *BulkWriteOptions { if opt.BypassDocumentValidation != nil { b.BypassDocumentValidation = opt.BypassDocumentValidation } + if opt.Let != nil { + b.Let = opt.Let + } } return b diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go index fe19f45eb..eb9b0643c 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go @@ -9,6 +9,7 @@ package options import ( "time" + "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" ) @@ -46,6 +47,16 @@ type ChangeStreamOptions struct { // corresponding to an oplog entry immediately after the specified token will be returned. If this is specified, // ResumeAfter and StartAtOperationTime must not be set. This option is only valid for MongoDB versions >= 4.1.1. StartAfter interface{} + + // Custom options to be added to the initial aggregate for the change stream. Key-value pairs of the BSON map should + // correlate with desired option names and values. Values must be Marshalable. Custom options may conflict with + // non-custom options, and custom options bypass client-side validation. Prefer using non-custom options where possible. + Custom bson.M + + // Custom options to be added to the $changeStream stage in the initial aggregate. Key-value pairs of the BSON map should + // correlate with desired option names and values. Values must be Marshalable. Custom pipeline options bypass client-side + // validation. Prefer using non-custom options where possible. + CustomPipeline bson.M } // ChangeStream creates a new ChangeStreamOptions instance. @@ -97,6 +108,23 @@ func (cso *ChangeStreamOptions) SetStartAfter(sa interface{}) *ChangeStreamOptio return cso } +// SetCustom sets the value for the Custom field. Key-value pairs of the BSON map should correlate +// with desired option names and values. Values must be Marshalable. Custom options may conflict +// with non-custom options, and custom options bypass client-side validation. Prefer using non-custom +// options where possible. +func (cso *ChangeStreamOptions) SetCustom(c bson.M) *ChangeStreamOptions { + cso.Custom = c + return cso +} + +// SetCustomPipeline sets the value for the CustomPipeline field. Key-value pairs of the BSON map +// should correlate with desired option names and values. Values must be Marshalable. Custom pipeline +// options bypass client-side validation. Prefer using non-custom options where possible. +func (cso *ChangeStreamOptions) SetCustomPipeline(cp bson.M) *ChangeStreamOptions { + cso.CustomPipeline = cp + return cso +} + // MergeChangeStreamOptions combines the given ChangeStreamOptions instances into a single ChangeStreamOptions in a // last-one-wins fashion. func MergeChangeStreamOptions(opts ...*ChangeStreamOptions) *ChangeStreamOptions { @@ -126,6 +154,12 @@ func MergeChangeStreamOptions(opts ...*ChangeStreamOptions) *ChangeStreamOptions if cso.StartAfter != nil { csOpts.StartAfter = cso.StartAfter } + if cso.Custom != nil { + csOpts.Custom = cso.Custom + } + if cso.CustomPipeline != nil { + csOpts.CustomPipeline = cso.CustomPipeline + } } return csOpts diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go index ea770413a..b8f6e8710 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go @@ -6,10 +6,16 @@ package options +import ( + "crypto/tls" + "fmt" +) + // ClientEncryptionOptions represents all possible options used to configure a ClientEncryption instance. type ClientEncryptionOptions struct { KeyVaultNamespace string KmsProviders map[string]map[string]interface{} + TLSConfig map[string]*tls.Config } // ClientEncryption creates a new ClientEncryptionOptions instance. @@ -29,6 +35,86 @@ func (c *ClientEncryptionOptions) SetKmsProviders(providers map[string]map[strin return c } +// SetTLSConfig specifies tls.Config instances for each KMS provider to use to configure TLS on all connections created +// to the KMS provider. +// +// This should only be used to set custom TLS configurations. By default, the connection will use an empty tls.Config{} with MinVersion set to tls.VersionTLS12. +func (c *ClientEncryptionOptions) SetTLSConfig(tlsOpts map[string]*tls.Config) *ClientEncryptionOptions { + tlsConfigs := make(map[string]*tls.Config) + for provider, config := range tlsOpts { + // use TLS min version 1.2 to enforce more secure hash algorithms and advanced cipher suites + if config.MinVersion == 0 { + config.MinVersion = tls.VersionTLS12 + } + tlsConfigs[provider] = config + } + c.TLSConfig = tlsConfigs + return c +} + +// BuildTLSConfig specifies tls.Config options for each KMS provider to use to configure TLS on all connections created +// to the KMS provider. The input map should contain a mapping from each KMS provider to a document containing the necessary +// options, as follows: +// +// { +// "kmip": { +// "tlsCertificateKeyFile": "foo.pem", +// "tlsCAFile": "fooCA.pem" +// } +// } +// +// Currently, the following TLS options are supported: +// +// 1. "tlsCertificateKeyFile" (or "sslClientCertificateKeyFile"): The "tlsCertificateKeyFile" option specifies a path to +// the client certificate and private key, which must be concatenated into one file. +// +// 2. "tlsCertificateKeyFilePassword" (or "sslClientCertificateKeyPassword"): Specify the password to decrypt the client +// private key file (e.g. "tlsCertificateKeyFilePassword=password"). +// +// 3. "tlsCaFile" (or "sslCertificateAuthorityFile"): Specify the path to a single or bundle of certificate authorities +// to be considered trusted when making a TLS connection (e.g. "tlsCaFile=/path/to/caFile"). +// +// This should only be used to set custom TLS options. By default, the connection will use an empty tls.Config{} with MinVersion set to tls.VersionTLS12. +func BuildTLSConfig(tlsOpts map[string]interface{}) (*tls.Config, error) { + // use TLS min version 1.2 to enforce more secure hash algorithms and advanced cipher suites + cfg := &tls.Config{MinVersion: tls.VersionTLS12} + + for name := range tlsOpts { + var err error + switch name { + case "tlsCertificateKeyFile", "sslClientCertificateKeyFile": + clientCertPath, ok := tlsOpts[name].(string) + if !ok { + return nil, fmt.Errorf("expected %q value to be of type string, got %T", name, tlsOpts[name]) + } + // apply custom key file password if found, otherwise use empty string + if keyPwd, found := tlsOpts["tlsCertificateKeyFilePassword"].(string); found { + _, err = addClientCertFromConcatenatedFile(cfg, clientCertPath, keyPwd) + } else if keyPwd, found := tlsOpts["sslClientCertificateKeyPassword"].(string); found { + _, err = addClientCertFromConcatenatedFile(cfg, clientCertPath, keyPwd) + } else { + _, err = addClientCertFromConcatenatedFile(cfg, clientCertPath, "") + } + case "tlsCertificateKeyFilePassword", "sslClientCertificateKeyPassword": + continue + case "tlsCAFile", "sslCertificateAuthorityFile": + caPath, ok := tlsOpts[name].(string) + if !ok { + return nil, fmt.Errorf("expected %q value to be of type string, got %T", name, tlsOpts[name]) + } + err = addCACertFromFile(cfg, caPath) + default: + return nil, fmt.Errorf("unrecognized TLS option %v", name) + } + + if err != nil { + return nil, err + } + } + + return cfg, nil +} + // MergeClientEncryptionOptions combines the argued ClientEncryptionOptions in a last-one wins fashion. func MergeClientEncryptionOptions(opts ...*ClientEncryptionOptions) *ClientEncryptionOptions { ceo := ClientEncryption() @@ -43,6 +129,9 @@ func MergeClientEncryptionOptions(opts ...*ClientEncryptionOptions) *ClientEncry if opt.KmsProviders != nil { ceo.KmsProviders = opt.KmsProviders } + if opt.TLSConfig != nil { + ceo.TLSConfig = opt.TLSConfig + } } return ceo diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go index d66114fad..115cc642d 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go @@ -109,6 +109,7 @@ type ClientOptions struct { MaxConnIdleTime *time.Duration MaxPoolSize *uint64 MinPoolSize *uint64 + MaxConnecting *uint64 PoolMonitor *event.PoolMonitor Monitor *event.CommandMonitor ServerMonitor *event.ServerMonitor @@ -121,6 +122,8 @@ type ClientOptions struct { ServerAPIOptions *ServerAPIOptions ServerSelectionTimeout *time.Duration SocketTimeout *time.Duration + SRVMaxHosts *int + SRVServiceName *string TLSConfig *tls.Config WriteConcern *writeconcern.WriteConcern ZlibLevel *int @@ -136,6 +139,13 @@ type ClientOptions struct { // release. AuthenticateToAnything *bool + // Crypt specifies a custom driver.Crypt to be used to encrypt and decrypt documents. The default is no + // encryption. + // + // Deprecated: This option is for internal use only and should not be set (see GODRIVER-2149). It may be + // changed or removed in any release. + Crypt driver.Crypt + // Deployment specifies a custom deployment to use for the new Client. // // Deprecated: This option is for internal use only and should not be set. It may be changed or removed in any @@ -180,12 +190,25 @@ func (c *ClientOptions) validateAndSetError() { if c.LoadBalanced != nil && *c.LoadBalanced { if len(c.Hosts) > 1 { c.err = internal.ErrLoadBalancedWithMultipleHosts + return } if c.ReplicaSet != nil { c.err = internal.ErrLoadBalancedWithReplicaSet + return } if c.Direct != nil { c.err = internal.ErrLoadBalancedWithDirectConnection + return + } + } + + // Validation for srvMaxHosts. + if c.SRVMaxHosts != nil && *c.SRVMaxHosts > 0 { + if c.ReplicaSet != nil { + c.err = internal.ErrSRVMaxHostsWithReplicaSet + } + if c.LoadBalanced != nil && *c.LoadBalanced { + c.err = internal.ErrSRVMaxHostsWithLoadBalanced } } } @@ -290,6 +313,10 @@ func (c *ClientOptions) ApplyURI(uri string) *ClientOptions { c.MinPoolSize = &cs.MinPoolSize } + if cs.MaxConnectingSet { + c.MaxConnecting = &cs.MaxConnecting + } + if cs.ReadConcernLevel != "" { c.ReadConcern = readconcern.New(readconcern.Level(cs.ReadConcernLevel)) } @@ -338,6 +365,14 @@ func (c *ClientOptions) ApplyURI(uri string) *ClientOptions { c.SocketTimeout = &cs.SocketTimeout } + if cs.SRVMaxHosts != 0 { + c.SRVMaxHosts = &cs.SRVMaxHosts + } + + if cs.SRVServiceName != "" { + c.SRVServiceName = &cs.SRVServiceName + } + if cs.SSL { tlsConfig := new(tls.Config) @@ -539,7 +574,7 @@ func (c *ClientOptions) SetMaxConnIdleTime(d time.Duration) *ClientOptions { // SetMaxPoolSize specifies that maximum number of connections allowed in the driver's connection pool to each server. // Requests to a server will block if this maximum is reached. This can also be set through the "maxPoolSize" URI option -// (e.g. "maxPoolSize=100"). The default is 100. If this is 0, it will be set to math.MaxInt64. +// (e.g. "maxPoolSize=100"). If this is 0, maximum connection pool size is not limited. The default is 100. func (c *ClientOptions) SetMaxPoolSize(u uint64) *ClientOptions { c.MaxPoolSize = &u return c @@ -553,6 +588,14 @@ func (c *ClientOptions) SetMinPoolSize(u uint64) *ClientOptions { return c } +// SetMaxConnecting specifies the maximum number of connections a connection pool may establish simultaneously. This can +// also be set through the "maxConnecting" URI option (e.g. "maxConnecting=2"). If this is 0, the default is used. The +// default is 2. Values greater than 100 are not recommended. +func (c *ClientOptions) SetMaxConnecting(u uint64) *ClientOptions { + c.MaxConnecting = &u + return c +} + // SetPoolMonitor specifies a PoolMonitor to receive connection pool events. See the event.PoolMonitor documentation // for more information about the structure of the monitor and events that can be received. func (c *ClientOptions) SetPoolMonitor(m *event.PoolMonitor) *ClientOptions { @@ -760,6 +803,22 @@ func (c *ClientOptions) SetServerAPIOptions(opts *ServerAPIOptions) *ClientOptio return c } +// SetSRVMaxHosts specifies the maximum number of SRV results to randomly select during polling. To limit the number +// of hosts selected in SRV discovery, this function must be called before ApplyURI. This can also be set through +// the "srvMaxHosts" URI option. +func (c *ClientOptions) SetSRVMaxHosts(srvMaxHosts int) *ClientOptions { + c.SRVMaxHosts = &srvMaxHosts + return c +} + +// SetSRVServiceName specifies a custom SRV service name to use in SRV polling. To use a custom SRV service name +// in SRV discovery, this function must be called before ApplyURI. This can also be set through the "srvServiceName" +// URI option. +func (c *ClientOptions) SetSRVServiceName(srvName string) *ClientOptions { + c.SRVServiceName = &srvName + return c +} + // MergeClientOptions combines the given *ClientOptions into a single *ClientOptions in a last one wins fashion. // The specified options are merged with the existing options on the client, with the specified options taking // precedence. @@ -789,6 +848,9 @@ func MergeClientOptions(opts ...*ClientOptions) *ClientOptions { if opt.ConnectTimeout != nil { c.ConnectTimeout = opt.ConnectTimeout } + if opt.Crypt != nil { + c.Crypt = opt.Crypt + } if opt.HeartbeatInterval != nil { c.HeartbeatInterval = opt.HeartbeatInterval } @@ -810,6 +872,9 @@ func MergeClientOptions(opts ...*ClientOptions) *ClientOptions { if opt.MinPoolSize != nil { c.MinPoolSize = opt.MinPoolSize } + if opt.MaxConnecting != nil { + c.MaxConnecting = opt.MaxConnecting + } if opt.PoolMonitor != nil { c.PoolMonitor = opt.PoolMonitor } @@ -849,6 +914,12 @@ func MergeClientOptions(opts ...*ClientOptions) *ClientOptions { if opt.SocketTimeout != nil { c.SocketTimeout = opt.SocketTimeout } + if opt.SRVMaxHosts != nil { + c.SRVMaxHosts = opt.SRVMaxHosts + } + if opt.SRVServiceName != nil { + c.SRVServiceName = opt.SRVServiceName + } if opt.TLSConfig != nil { c.TLSConfig = opt.TLSConfig } @@ -930,7 +1001,8 @@ func addClientCertFromConcatenatedFile(cfg *tls.Config, certKeyFile, keyPassword // containing file and returns the certificate's subject name. func addClientCertFromBytes(cfg *tls.Config, data []byte, keyPasswd string) (string, error) { var currentBlock *pem.Block - var certBlock, certDecodedBlock, keyBlock []byte + var certDecodedBlock []byte + var certBlocks, keyBlocks [][]byte remaining := data start := 0 @@ -941,7 +1013,8 @@ func addClientCertFromBytes(cfg *tls.Config, data []byte, keyPasswd string) (str } if currentBlock.Type == "CERTIFICATE" { - certBlock = data[start : len(data)-len(remaining)] + certBlock := data[start : len(data)-len(remaining)] + certBlocks = append(certBlocks, certBlock) certDecodedBlock = currentBlock.Bytes start += len(certBlock) } else if strings.HasSuffix(currentBlock.Type, "PRIVATE KEY") { @@ -966,29 +1039,31 @@ func addClientCertFromBytes(cfg *tls.Config, data []byte, keyPasswd string) (str if err != nil { return "", err } - keyBytes, err = x509MarshalPKCS8PrivateKey(decrypted) + keyBytes, err = x509.MarshalPKCS8PrivateKey(decrypted) if err != nil { return "", err } } var encoded bytes.Buffer pem.Encode(&encoded, &pem.Block{Type: currentBlock.Type, Bytes: keyBytes}) - keyBlock = encoded.Bytes() + keyBlock := encoded.Bytes() + keyBlocks = append(keyBlocks, keyBlock) start = len(data) - len(remaining) } else { - keyBlock = data[start : len(data)-len(remaining)] + keyBlock := data[start : len(data)-len(remaining)] + keyBlocks = append(keyBlocks, keyBlock) start += len(keyBlock) } } } - if len(certBlock) == 0 { + if len(certBlocks) == 0 { return "", fmt.Errorf("failed to find CERTIFICATE") } - if len(keyBlock) == 0 { + if len(keyBlocks) == 0 { return "", fmt.Errorf("failed to find PRIVATE KEY") } - cert, err := tls.X509KeyPair(certBlock, keyBlock) + cert, err := tls.X509KeyPair(bytes.Join(certBlocks, []byte("\n")), bytes.Join(keyBlocks, []byte("\n"))) if err != nil { return "", err } @@ -1002,7 +1077,7 @@ func addClientCertFromBytes(cfg *tls.Config, data []byte, keyPasswd string) (str return "", err } - return x509CertSubject(crt), nil + return crt.Subject.String(), nil } func stringSliceContains(source []string, target string) bool { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions_1_10.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions_1_10.go deleted file mode 100644 index 1943d9c50..000000000 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions_1_10.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build go1.10 - -package options - -import "crypto/x509" - -func x509CertSubject(cert *x509.Certificate) string { - return cert.Subject.String() -} - -func x509MarshalPKCS8PrivateKey(pkcs8 interface{}) ([]byte, error) { - return x509.MarshalPKCS8PrivateKey(pkcs8) -} diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions_1_9.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions_1_9.go deleted file mode 100644 index e10992296..000000000 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions_1_9.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !go1.10 - -package options - -import ( - "crypto/x509" - "fmt" -) - -// We don't support Go versions less than 1.10, but Evergreen needs to be able to compile the driver -// using version 1.9 and cert.Subject -func x509CertSubject(cert *x509.Certificate) string { - return "" -} - -// We don't support Go versions less than 1.10, but Evergreen needs to be able to compile the driver -// using version 1.9 and x509.MarshalPKCS8PrivateKey() -func x509MarshalPKCS8PrivateKey(pkcs8 interface{}) ([]byte, error) { - return nil, fmt.Errorf("PKCS8-encrypted client private keys are only supported with go1.10+") -} diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go index 3fe63c323..130c8e75c 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go @@ -110,10 +110,20 @@ type CreateCollectionOptions struct { // collection. Validator interface{} - // Value indicating after how many seconds old time-series data should be deleted. + // Value indicating after how many seconds old time-series data should be deleted. See + // https://docs.mongodb.com/manual/reference/command/create/ for supported options, and + // https://docs.mongodb.com/manual/core/timeseries-collections/ for more information on time-series + // collections. + // + // This option is only valid for MongoDB versions >= 5.0 ExpireAfterSeconds *int64 - // Options for specifying a time-series collection. + // Options for specifying a time-series collection. See + // https://docs.mongodb.com/manual/reference/command/create/ for supported options, and + // https://docs.mongodb.com/manual/core/timeseries-collections/ for more information on time-series + // collections. + // + // This option is only valid for MongoDB versions >= 5.0 TimeSeriesOptions *TimeSeriesOptions } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/deleteoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/deleteoptions.go index fcea40a5c..0473d81ff 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/deleteoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/deleteoptions.go @@ -20,6 +20,12 @@ type DeleteOptions struct { // operation. The driver will return an error if the hint parameter is a multi-key map. The default value is nil, // which means that no hint will be sent. Hint interface{} + + // Specifies parameters for the delete expression. This option is only valid for MongoDB versions >= 5.0. Older + // servers will report an error for using this option. This must be a document mapping parameter names to values. + // Values must be constant or closed expressions that do not reference document fields. Parameters can then be + // accessed as variables in an aggregate expression context (e.g. "$$var"). + Let interface{} } // Delete creates a new DeleteOptions instance. @@ -39,6 +45,12 @@ func (do *DeleteOptions) SetHint(hint interface{}) *DeleteOptions { return do } +// SetLet sets the value for the Let field. +func (do *DeleteOptions) SetLet(let interface{}) *DeleteOptions { + do.Let = let + return do +} + // MergeDeleteOptions combines the given DeleteOptions instances into a single DeleteOptions in a last-one-wins fashion. func MergeDeleteOptions(opts ...*DeleteOptions) *DeleteOptions { dOpts := Delete() @@ -52,6 +64,9 @@ func MergeDeleteOptions(opts ...*DeleteOptions) *DeleteOptions { if do.Hint != nil { dOpts.Hint = do.Hint } + if do.Let != nil { + dOpts.Let = do.Let + } } return dOpts diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/findoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/findoptions.go index ad9409c97..0dd09f514 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/findoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/findoptions.go @@ -99,6 +99,12 @@ type FindOptions struct { // A document specifying the order in which documents should be returned. The driver will return an error if the // sort parameter is a multi-key map. Sort interface{} + + // Specifies parameters for the find expression. This option is only valid for MongoDB versions >= 5.0. Older + // servers will report an error for using this option. This must be a document mapping parameter names to values. + // Values must be constant or closed expressions that do not reference document fields. Parameters can then be + // accessed as variables in an aggregate expression context (e.g. "$$var"). + Let interface{} } // Find creates a new FindOptions instance. @@ -148,6 +154,12 @@ func (f *FindOptions) SetHint(hint interface{}) *FindOptions { return f } +// SetLet sets the value for the Let field. +func (f *FindOptions) SetLet(let interface{}) *FindOptions { + f.Let = let + return f +} + // SetLimit sets the value for the Limit field. func (f *FindOptions) SetLimit(i int64) *FindOptions { f.Limit = &i @@ -258,6 +270,9 @@ func MergeFindOptions(opts ...*FindOptions) *FindOptions { if opt.Hint != nil { fo.Hint = opt.Hint } + if opt.Let != nil { + fo.Let = opt.Let + } if opt.Limit != nil { fo.Limit = opt.Limit } @@ -624,6 +639,12 @@ type FindOneAndReplaceOptions struct { // will return an error if the hint parameter is a multi-key map. The default value is nil, which means that no hint // will be sent. Hint interface{} + + // Specifies parameters for the find one and replace expression. This option is only valid for MongoDB versions >= 5.0. Older + // servers will report an error for using this option. This must be a document mapping parameter names to values. + // Values must be constant or closed expressions that do not reference document fields. Parameters can then be + // accessed as variables in an aggregate expression context (e.g. "$$var"). + Let interface{} } // FindOneAndReplace creates a new FindOneAndReplaceOptions instance. @@ -679,6 +700,12 @@ func (f *FindOneAndReplaceOptions) SetHint(hint interface{}) *FindOneAndReplaceO return f } +// SetLet sets the value for the Let field. +func (f *FindOneAndReplaceOptions) SetLet(let interface{}) *FindOneAndReplaceOptions { + f.Let = let + return f +} + // MergeFindOneAndReplaceOptions combines the given FindOneAndReplaceOptions instances into a single // FindOneAndReplaceOptions in a last-one-wins fashion. func MergeFindOneAndReplaceOptions(opts ...*FindOneAndReplaceOptions) *FindOneAndReplaceOptions { @@ -711,6 +738,9 @@ func MergeFindOneAndReplaceOptions(opts ...*FindOneAndReplaceOptions) *FindOneAn if opt.Hint != nil { fo.Hint = opt.Hint } + if opt.Let != nil { + fo.Let = opt.Let + } } return fo @@ -762,6 +792,12 @@ type FindOneAndUpdateOptions struct { // will return an error if the hint parameter is a multi-key map. The default value is nil, which means that no hint // will be sent. Hint interface{} + + // Specifies parameters for the find one and update expression. This option is only valid for MongoDB versions >= 5.0. Older + // servers will report an error for using this option. This must be a document mapping parameter names to values. + // Values must be constant or closed expressions that do not reference document fields. Parameters can then be + // accessed as variables in an aggregate expression context (e.g. "$$var"). + Let interface{} } // FindOneAndUpdate creates a new FindOneAndUpdateOptions instance. @@ -823,6 +859,12 @@ func (f *FindOneAndUpdateOptions) SetHint(hint interface{}) *FindOneAndUpdateOpt return f } +// SetLet sets the value for the Let field. +func (f *FindOneAndUpdateOptions) SetLet(let interface{}) *FindOneAndUpdateOptions { + f.Let = let + return f +} + // MergeFindOneAndUpdateOptions combines the given FindOneAndUpdateOptions instances into a single // FindOneAndUpdateOptions in a last-one-wins fashion. func MergeFindOneAndUpdateOptions(opts ...*FindOneAndUpdateOptions) *FindOneAndUpdateOptions { @@ -858,6 +900,9 @@ func MergeFindOneAndUpdateOptions(opts ...*FindOneAndUpdateOptions) *FindOneAndU if opt.Hint != nil { fo.Hint = opt.Hint } + if opt.Let != nil { + fo.Let = opt.Let + } } return fo @@ -890,6 +935,12 @@ type FindOneAndDeleteOptions struct { // will return an error if the hint parameter is a multi-key map. The default value is nil, which means that no hint // will be sent. Hint interface{} + + // Specifies parameters for the find one and delete expression. This option is only valid for MongoDB versions >= 5.0. Older + // servers will report an error for using this option. This must be a document mapping parameter names to values. + // Values must be constant or closed expressions that do not reference document fields. Parameters can then be + // accessed as variables in an aggregate expression context (e.g. "$$var"). + Let interface{} } // FindOneAndDelete creates a new FindOneAndDeleteOptions instance. @@ -927,6 +978,12 @@ func (f *FindOneAndDeleteOptions) SetHint(hint interface{}) *FindOneAndDeleteOpt return f } +// SetLet sets the value for the Let field. +func (f *FindOneAndDeleteOptions) SetLet(let interface{}) *FindOneAndDeleteOptions { + f.Let = let + return f +} + // MergeFindOneAndDeleteOptions combines the given FindOneAndDeleteOptions instances into a single // FindOneAndDeleteOptions in a last-one-wins fashion. func MergeFindOneAndDeleteOptions(opts ...*FindOneAndDeleteOptions) *FindOneAndDeleteOptions { @@ -950,6 +1007,9 @@ func MergeFindOneAndDeleteOptions(opts ...*FindOneAndDeleteOptions) *FindOneAndD if opt.Hint != nil { fo.Hint = opt.Hint } + if opt.Let != nil { + fo.Let = opt.Let + } } return fo diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/indexoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/indexoptions.go index 076eedd76..ed71fb418 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/indexoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/indexoptions.go @@ -389,6 +389,9 @@ func MergeIndexOptions(opts ...*IndexOptions) *IndexOptions { i := Index() for _, opt := range opts { + if opt == nil { + continue + } if opt.Background != nil { i.Background = opt.Background } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/listcollectionsoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/listcollectionsoptions.go index 4c2ce3e6d..6f4b1cca6 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/listcollectionsoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/listcollectionsoptions.go @@ -13,6 +13,10 @@ type ListCollectionsOptions struct { // The maximum number of documents to be included in each batch returned by the server. BatchSize *int32 + + // If true, and NameOnly is true, limits the documents returned to only contain collections the user is authorized to use. The default value + // is false. This option is only valid for MongoDB server versions >= 4.0. Server versions < 4.0 ignore this option. + AuthorizedCollections *bool } // ListCollections creates a new ListCollectionsOptions instance. @@ -32,6 +36,13 @@ func (lc *ListCollectionsOptions) SetBatchSize(size int32) *ListCollectionsOptio return lc } +// SetAuthorizedCollections sets the value for the AuthorizedCollections field. This option is only valid for MongoDB server versions >= 4.0. Server +// versions < 4.0 ignore this option. +func (lc *ListCollectionsOptions) SetAuthorizedCollections(b bool) *ListCollectionsOptions { + lc.AuthorizedCollections = &b + return lc +} + // MergeListCollectionsOptions combines the given ListCollectionsOptions instances into a single *ListCollectionsOptions // in a last-one-wins fashion. func MergeListCollectionsOptions(opts ...*ListCollectionsOptions) *ListCollectionsOptions { @@ -46,6 +57,9 @@ func MergeListCollectionsOptions(opts ...*ListCollectionsOptions) *ListCollectio if opt.BatchSize != nil { lc.BatchSize = opt.BatchSize } + if opt.AuthorizedCollections != nil { + lc.AuthorizedCollections = opt.AuthorizedCollections + } } return lc diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/listdatabasesoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/listdatabasesoptions.go index 42fe17042..f19d89c6d 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/listdatabasesoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/listdatabasesoptions.go @@ -40,7 +40,7 @@ func (ld *ListDatabasesOptions) SetAuthorizedDatabases(b bool) *ListDatabasesOpt func MergeListDatabasesOptions(opts ...*ListDatabasesOptions) *ListDatabasesOptions { ld := ListDatabases() for _, opt := range opts { - if opts == nil { + if opt == nil { continue } if opt.NameOnly != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go index f6df24379..179b73563 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go @@ -22,7 +22,7 @@ type Collation struct { Locale string `bson:",omitempty"` // The locale CaseLevel bool `bson:",omitempty"` // The case level CaseFirst string `bson:",omitempty"` // The case ordering - Strength int `bson:",omitempty"` // The number of comparision levels to use + Strength int `bson:",omitempty"` // The number of comparison levels to use NumericOrdering bool `bson:",omitempty"` // Whether to order numbers based on numerical order and not collation order Alternate string `bson:",omitempty"` // Whether spaces and punctuation are considered base characters MaxVariable string `bson:",omitempty"` // Which characters are affected by alternate: "shifted" @@ -157,5 +157,3 @@ type MarshalError struct { func (me MarshalError) Error() string { return fmt.Sprintf("cannot transform type %s to a bson.Raw", reflect.TypeOf(me.Value)) } - -var defaultRegistry = bson.DefaultRegistry diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/replaceoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/replaceoptions.go index 543c81cea..9cb9ab872 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/replaceoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/replaceoptions.go @@ -30,6 +30,12 @@ type ReplaceOptions struct { // If true, a new document will be inserted if the filter does not match any documents in the collection. The // default value is false. Upsert *bool + + // Specifies parameters for the aggregate expression. This option is only valid for MongoDB versions >= 5.0. Older + // servers will report an error for using this option. This must be a document mapping parameter names to values. + // Values must be constant or closed expressions that do not reference document fields. Parameters can then be + // accessed as variables in an aggregate expression context (e.g. "$$var"). + Let interface{} } // Replace creates a new ReplaceOptions instance. @@ -61,6 +67,12 @@ func (ro *ReplaceOptions) SetUpsert(b bool) *ReplaceOptions { return ro } +// SetLet sets the value for the Let field. +func (ro *ReplaceOptions) SetLet(l interface{}) *ReplaceOptions { + ro.Let = l + return ro +} + // MergeReplaceOptions combines the given ReplaceOptions instances into a single ReplaceOptions in a last-one-wins // fashion. func MergeReplaceOptions(opts ...*ReplaceOptions) *ReplaceOptions { @@ -81,6 +93,9 @@ func MergeReplaceOptions(opts ...*ReplaceOptions) *ReplaceOptions { if ro.Upsert != nil { rOpts.Upsert = ro.Upsert } + if ro.Let != nil { + rOpts.Let = ro.Let + } } return rOpts diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/updateoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/updateoptions.go index 4d1e7e47e..fd0631de4 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/updateoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/updateoptions.go @@ -35,6 +35,12 @@ type UpdateOptions struct { // If true, a new document will be inserted if the filter does not match any documents in the collection. The // default value is false. Upsert *bool + + // Specifies parameters for the update expression. This option is only valid for MongoDB versions >= 5.0. Older + // servers will report an error for using this option. This must be a document mapping parameter names to values. + // Values must be constant or closed expressions that do not reference document fields. Parameters can then be + // accessed as variables in an aggregate expression context (e.g. "$$var"). + Let interface{} } // Update creates a new UpdateOptions instance. @@ -72,6 +78,12 @@ func (uo *UpdateOptions) SetUpsert(b bool) *UpdateOptions { return uo } +// SetLet sets the value for the Let field. +func (uo *UpdateOptions) SetLet(l interface{}) *UpdateOptions { + uo.Let = l + return uo +} + // MergeUpdateOptions combines the given UpdateOptions instances into a single UpdateOptions in a last-one-wins fashion. func MergeUpdateOptions(opts ...*UpdateOptions) *UpdateOptions { uOpts := Update() @@ -94,6 +106,9 @@ func MergeUpdateOptions(opts ...*UpdateOptions) *UpdateOptions { if uo.Upsert != nil { uOpts.Upsert = uo.Upsert } + if uo.Let != nil { + uOpts.Let = uo.Let + } } return uOpts diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/readpref/options.go b/vendor/go.mongodb.org/mongo-driver/mongo/readpref/options.go index f742de0ce..68286d10a 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/readpref/options.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/readpref/options.go @@ -61,7 +61,7 @@ func WithTagSets(tagSets ...tag.Set) Option { // WithHedgeEnabled specifies whether or not hedged reads should be enabled in the server. This feature requires MongoDB // server version 4.4 or higher. For more information about hedged reads, see -// https://docs.mongodb.com/master/core/sharded-cluster-query-router/#mongos-hedged-reads. If not specified, the default +// https://docs.mongodb.com/manual/core/sharded-cluster-query-router/#mongos-hedged-reads. If not specified, the default // is to not send a value to the server, which will result in the server defaults being used. func WithHedgeEnabled(hedgeEnabled bool) Option { return func(rp *ReadPref) error { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/readpref/readpref.go b/vendor/go.mongodb.org/mongo-driver/mongo/readpref/readpref.go index b651b69e9..a3ac0edf8 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/readpref/readpref.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/readpref/readpref.go @@ -65,6 +65,9 @@ func New(mode Mode, opts ...Option) (*ReadPref, error) { } for _, opt := range opts { + if opt == nil { + continue + } err := opt(rp) if err != nil { return nil, err diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/results.go b/vendor/go.mongodb.org/mongo-driver/mongo/results.go index 52e2dedd4..6951bea65 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/results.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/results.go @@ -158,15 +158,30 @@ type IndexSpecification struct { // The index version. Version int32 + + // The length of time, in seconds, for documents to remain in the collection. The default value is 0, which means + // that documents will remain in the collection until they're explicitly deleted or the collection is dropped. + ExpireAfterSeconds *int32 + + // If true, the index will only reference documents that contain the fields specified in the index. The default is + // false. + Sparse *bool + + // If true, the collection will not accept insertion or update of documents where the index key value matches an + // existing value in the index. The default is false. + Unique *bool } var _ bson.Unmarshaler = (*IndexSpecification)(nil) type unmarshalIndexSpecification struct { - Name string `bson:"name"` - Namespace string `bson:"ns"` - KeysDocument bson.Raw `bson:"key"` - Version int32 `bson:"v"` + Name string `bson:"name"` + Namespace string `bson:"ns"` + KeysDocument bson.Raw `bson:"key"` + Version int32 `bson:"v"` + ExpireAfterSeconds *int32 `bson:"expireAfterSeconds"` + Sparse *bool `bson:"sparse"` + Unique *bool `bson:"unique"` } // UnmarshalBSON implements the bson.Unmarshaler interface. @@ -180,6 +195,9 @@ func (i *IndexSpecification) UnmarshalBSON(data []byte) error { i.Namespace = temp.Namespace i.KeysDocument = temp.KeysDocument i.Version = temp.Version + i.ExpireAfterSeconds = temp.ExpireAfterSeconds + i.Sparse = temp.Sparse + i.Unique = temp.Unique return nil } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/session.go b/vendor/go.mongodb.org/mongo-driver/mongo/session.go index c1a2c8ea3..93bc5cb46 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/session.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/session.go @@ -100,13 +100,16 @@ func SessionFromContext(ctx context.Context) Session { // resources are properly cleaned up, context deadlines and cancellations will not be respected during this call. For a // usage example, see the Client.StartSession method documentation. // -// ClusterTime, OperationTime, Client, and ID return the session's current operation time, the session's current cluster +// ClusterTime, OperationTime, Client, and ID return the session's current cluster time, the session's current operation // time, the Client associated with the session, and the ID document associated with the session, respectively. The ID // document for a session is in the form {"id": }. // // EndSession method should abort any existing transactions and close the session. // -// AdvanceClusterTime and AdvanceOperationTime are for internal use only and must not be called. +// AdvanceClusterTime advances the cluster time for a session. This method will return an error if the session has ended. +// +// AdvanceOperationTime advances the operation time for a session. This method will return an error if the session has +// ended. type Session interface { // Functions to modify session state. StartTransaction(...*options.TransactionOptions) error diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/single_result.go b/vendor/go.mongodb.org/mongo-driver/mongo/single_result.go index c693dfae2..476025021 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/single_result.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/single_result.go @@ -28,6 +28,31 @@ type SingleResult struct { reg *bsoncodec.Registry } +// NewSingleResultFromDocument creates a SingleResult with the provided error, registry, and an underlying Cursor pre-loaded with +// the provided document, error and registry. If no registry is provided, bson.DefaultRegistry will be used. If an error distinct +// from the one provided occurs during creation of the SingleResult, that error will be stored on the returned SingleResult. +// +// The document parameter must be a non-nil document. +func NewSingleResultFromDocument(document interface{}, err error, registry *bsoncodec.Registry) *SingleResult { + if document == nil { + return &SingleResult{err: ErrNilDocument} + } + if registry == nil { + registry = bson.DefaultRegistry + } + + cur, createErr := NewCursorFromDocuments([]interface{}{document}, err, registry) + if createErr != nil { + return &SingleResult{err: createErr} + } + + return &SingleResult{ + cur: cur, + err: err, + reg: registry, + } +} + // Decode will unmarshal the document represented by this SingleResult into v. If there was an error from the operation // that created this SingleResult, that error will be returned. If the operation returned no documents, Decode will // return ErrNoDocuments. @@ -71,6 +96,7 @@ func (sr *SingleResult) setRdrContents() error { return nil case sr.cur != nil: defer sr.cur.Close(context.TODO()) + if !sr.cur.Next(context.TODO()) { if err := sr.cur.Err(); err != nil { return err diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go b/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go index b2831bf18..b9145c9ee 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go @@ -105,7 +105,7 @@ func (wc *WriteConcern) MarshalBSONValue() (bsontype.Type, []byte, error) { elems = bsoncore.AppendInt32Element(elems, "w", int32(t)) case string: - elems = bsoncore.AppendStringElement(elems, "w", string(t)) + elems = bsoncore.AppendStringElement(elems, "w", t) } } diff --git a/vendor/go.mongodb.org/mongo-driver/version/version.go b/vendor/go.mongodb.org/mongo-driver/version/version.go index 5bb8e71c3..3adbbb664 100644 --- a/vendor/go.mongodb.org/mongo-driver/version/version.go +++ b/vendor/go.mongodb.org/mongo-driver/version/version.go @@ -7,4 +7,4 @@ package version // import "go.mongodb.org/mongo-driver/version" // Driver is the current version of the driver. -var Driver = "v1.7.2" +var Driver = "v1.9.1" diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_documentbuilder.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_documentbuilder.go index b0d45212d..52162f8aa 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_documentbuilder.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_documentbuilder.go @@ -45,7 +45,7 @@ func (db *DocumentBuilder) AppendInt32(key string, i32 int32) *DocumentBuilder { return db } -// AppendDocument will append a bson embeded document element using key +// AppendDocument will append a bson embedded document element using key // and doc to DocumentBuilder.doc func (db *DocumentBuilder) AppendDocument(key string, doc []byte) *DocumentBuilder { db.doc = AppendDocumentElement(db.doc, key, doc) diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go index b8db838a5..5b0c3a042 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go @@ -15,7 +15,7 @@ // enough bytes. This library attempts to do no validation, it will only return // false if there are not enough bytes for an item to be read. For example, the // ReadDocument function checks the length, if that length is larger than the -// number of bytes availble, it will return false, if there are enough bytes, it +// number of bytes available, it will return false, if there are enough bytes, it // will return those bytes and true. It is the consumers responsibility to // validate those bytes. // @@ -69,7 +69,7 @@ func AppendHeader(dst []byte, t bsontype.Type, key string) []byte { // was read. // ReadType will return the first byte of the provided []byte as a type. If -// there is no availble byte, false is returned. +// there is no available byte, false is returned. func ReadType(src []byte) (bsontype.Type, []byte, bool) { if len(src) < 1 { return 0, src, false @@ -231,7 +231,7 @@ func AppendDocumentEnd(dst []byte, index int32) ([]byte, error) { // AppendDocument will append doc to dst and return the extended buffer. func AppendDocument(dst []byte, doc []byte) []byte { return append(dst, doc...) } -// AppendDocumentElement will append a BSON embeded document element using key +// AppendDocumentElement will append a BSON embedded document element using key // and doc to dst and return the extended buffer. func AppendDocumentElement(dst []byte, key string, doc []byte) []byte { return AppendDocument(AppendHeader(dst, bsontype.EmbeddedDocument, key), doc) diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go index ed95233d9..54aa617cf 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go @@ -602,7 +602,7 @@ func (v Value) Time() time.Time { if !ok { panic(NewInsufficientBytesError(v.Data, v.Data)) } - return time.Unix(int64(dt)/1000, int64(dt)%1000*1000000) + return time.Unix(dt/1000, dt%1000*1000000) } // TimeOK is the same as Time, except it returns a boolean instead of @@ -615,7 +615,7 @@ func (v Value) TimeOK() (time.Time, bool) { if !ok { return time.Time{}, false } - return time.Unix(int64(dt)/1000, int64(dt)%1000*1000000), true + return time.Unix(dt/1000, dt%1000*1000000), true } // Regex returns the BSON regex value the Value represents. It panics if the value is a BSON diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/document.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/document.go index 6d2c6f31a..2d53bc18b 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/document.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/document.go @@ -264,7 +264,7 @@ func (d Doc) Equal(id IDoc) bool { } } case MDoc: - unique := make(map[string]struct{}, 0) + unique := make(map[string]struct{}) for _, elem := range d { unique[elem.Key] = struct{}{} val, ok := tt[elem.Key] diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/element.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/element.go index 66054b2c5..00d1ba377 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/element.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/element.go @@ -12,8 +12,6 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" ) -const validateMaxDepthDefault = 2048 - // ElementTypeError specifies that a method to obtain a BSON value an incorrect type was called on a bson.Value. // // TODO: rename this ValueTypeError. diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/mdocument.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/mdocument.go index 380c8629c..7877f2240 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/mdocument.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/mdocument.go @@ -21,7 +21,7 @@ type MDoc map[string]Val // ReadMDoc will create a Doc using the provided slice of bytes. If the // slice of bytes is not a valid BSON document, this method will return an error. func ReadMDoc(b []byte) (MDoc, error) { - doc := make(MDoc, 0) + doc := make(MDoc) err := doc.UnmarshalBSON(b) if err != nil { return nil, err @@ -188,7 +188,7 @@ func (d MDoc) Equal(id IDoc) bool { } } case Doc: - unique := make(map[string]struct{}, 0) + unique := make(map[string]struct{}) for _, elem := range tt { unique[elem.Key] = struct{}{} val, ok := d[elem.Key] diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/primitive_codecs.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/primitive_codecs.go index 7328a8671..01bd18267 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/primitive_codecs.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/primitive_codecs.go @@ -19,7 +19,6 @@ import ( var primitiveCodecs PrimitiveCodecs var tDocument = reflect.TypeOf((Doc)(nil)) -var tMDoc = reflect.TypeOf((MDoc)(nil)) var tArray = reflect.TypeOf((Arr)(nil)) var tValue = reflect.TypeOf(Val{}) var tElementSlice = reflect.TypeOf(([]Elem)(nil)) diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/reflectionfree_d_codec.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/reflectionfree_d_codec.go index 9df93ad47..7e68e55c1 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/reflectionfree_d_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/reflectionfree_d_codec.go @@ -20,7 +20,6 @@ import ( var ( tPrimitiveD = reflect.TypeOf(primitive.D{}) - tPrimitiveA = reflect.TypeOf(primitive.A{}) tPrimitiveCWS = reflect.TypeOf(primitive.CodeWithScope{}) defaultValueEncoders = bsoncodec.DefaultValueEncoders{} defaultValueDecoders = bsoncodec.DefaultValueDecoders{} @@ -224,13 +223,13 @@ func (r *reflectionFreeDCodec) decodeValue(dc bsoncodec.DecodeContext, vr bsonrw func (r *reflectionFreeDCodec) encodeDocumentValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, v interface{}) error { switch val := v.(type) { case int: - return r.encodeInt(ec, vw, val) + return r.encodeInt(vw, val) case int8: return vw.WriteInt32(int32(val)) case int16: return vw.WriteInt32(int32(val)) case int32: - return vw.WriteInt32(int32(val)) + return vw.WriteInt32(val) case int64: return r.encodeInt64(ec, vw, val) case uint: @@ -293,69 +292,69 @@ func (r *reflectionFreeDCodec) encodeDocumentValue(ec bsoncodec.EncodeContext, v case []primitive.D: return r.encodeSliceD(ec, vw, val) case []int: - return r.encodeSliceInt(ec, vw, val) + return r.encodeSliceInt(vw, val) case []int8: - return r.encodeSliceInt8(ec, vw, val) + return r.encodeSliceInt8(vw, val) case []int16: - return r.encodeSliceInt16(ec, vw, val) + return r.encodeSliceInt16(vw, val) case []int32: - return r.encodeSliceInt32(ec, vw, val) + return r.encodeSliceInt32(vw, val) case []int64: return r.encodeSliceInt64(ec, vw, val) case []uint: return r.encodeSliceUint(ec, vw, val) case []uint16: - return r.encodeSliceUint16(ec, vw, val) + return r.encodeSliceUint16(vw, val) case []uint32: return r.encodeSliceUint32(ec, vw, val) case []uint64: return r.encodeSliceUint64(ec, vw, val) case [][]byte: - return r.encodeSliceByteSlice(ec, vw, val) + return r.encodeSliceByteSlice(vw, val) case []primitive.Binary: - return r.encodeSliceBinary(ec, vw, val) + return r.encodeSliceBinary(vw, val) case []bool: - return r.encodeSliceBoolean(ec, vw, val) + return r.encodeSliceBoolean(vw, val) case []primitive.CodeWithScope: return r.encodeSliceCWS(ec, vw, val) case []primitive.DBPointer: - return r.encodeSliceDBPointer(ec, vw, val) + return r.encodeSliceDBPointer(vw, val) case []primitive.DateTime: - return r.encodeSliceDateTime(ec, vw, val) + return r.encodeSliceDateTime(vw, val) case []time.Time: - return r.encodeSliceTimeTime(ec, vw, val) + return r.encodeSliceTimeTime(vw, val) case []primitive.Decimal128: - return r.encodeSliceDecimal128(ec, vw, val) + return r.encodeSliceDecimal128(vw, val) case []float32: - return r.encodeSliceFloat32(ec, vw, val) + return r.encodeSliceFloat32(vw, val) case []float64: - return r.encodeSliceFloat64(ec, vw, val) + return r.encodeSliceFloat64(vw, val) case []primitive.JavaScript: - return r.encodeSliceJavaScript(ec, vw, val) + return r.encodeSliceJavaScript(vw, val) case []primitive.MinKey: - return r.encodeSliceMinKey(ec, vw, val) + return r.encodeSliceMinKey(vw, val) case []primitive.MaxKey: - return r.encodeSliceMaxKey(ec, vw, val) + return r.encodeSliceMaxKey(vw, val) case []primitive.Null: - return r.encodeSliceNull(ec, vw, val) + return r.encodeSliceNull(vw, val) case []primitive.ObjectID: - return r.encodeSliceObjectID(ec, vw, val) + return r.encodeSliceObjectID(vw, val) case []primitive.Regex: - return r.encodeSliceRegex(ec, vw, val) + return r.encodeSliceRegex(vw, val) case []string: - return r.encodeSliceString(ec, vw, val) + return r.encodeSliceString(vw, val) case []primitive.Symbol: - return r.encodeSliceSymbol(ec, vw, val) + return r.encodeSliceSymbol(vw, val) case []primitive.Timestamp: - return r.encodeSliceTimestamp(ec, vw, val) + return r.encodeSliceTimestamp(vw, val) case []primitive.Undefined: - return r.encodeSliceUndefined(ec, vw, val) + return r.encodeSliceUndefined(vw, val) default: return fmt.Errorf("value of type %T not supported", v) } } -func (r *reflectionFreeDCodec) encodeInt(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val int) error { +func (r *reflectionFreeDCodec) encodeInt(vw bsonrw.ValueWriter, val int) error { if fitsIn32Bits(int64(val)) { return vw.WriteInt32(int32(val)) } @@ -366,7 +365,7 @@ func (r *reflectionFreeDCodec) encodeInt64(ec bsoncodec.EncodeContext, vw bsonrw if ec.MinSize && fitsIn32Bits(val) { return vw.WriteInt32(int32(val)) } - return vw.WriteInt64(int64(val)) + return vw.WriteInt64(val) } func (r *reflectionFreeDCodec) encodeUint64(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val uint64) error { @@ -400,7 +399,7 @@ func (r *reflectionFreeDCodec) encodeDocument(ec bsoncodec.EncodeContext, vw bso return dw.WriteDocumentEnd() } -func (r *reflectionFreeDCodec) encodeSliceByteSlice(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr [][]byte) error { +func (r *reflectionFreeDCodec) encodeSliceByteSlice(vw bsonrw.ValueWriter, arr [][]byte) error { aw, err := vw.WriteArray() if err != nil { return err @@ -420,7 +419,7 @@ func (r *reflectionFreeDCodec) encodeSliceByteSlice(ec bsoncodec.EncodeContext, return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceBinary(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []primitive.Binary) error { +func (r *reflectionFreeDCodec) encodeSliceBinary(vw bsonrw.ValueWriter, arr []primitive.Binary) error { aw, err := vw.WriteArray() if err != nil { return err @@ -440,7 +439,7 @@ func (r *reflectionFreeDCodec) encodeSliceBinary(ec bsoncodec.EncodeContext, vw return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceBoolean(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []bool) error { +func (r *reflectionFreeDCodec) encodeSliceBoolean(vw bsonrw.ValueWriter, arr []bool) error { aw, err := vw.WriteArray() if err != nil { return err @@ -480,7 +479,7 @@ func (r *reflectionFreeDCodec) encodeSliceCWS(ec bsoncodec.EncodeContext, vw bso return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceDBPointer(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []primitive.DBPointer) error { +func (r *reflectionFreeDCodec) encodeSliceDBPointer(vw bsonrw.ValueWriter, arr []primitive.DBPointer) error { aw, err := vw.WriteArray() if err != nil { return err @@ -500,7 +499,7 @@ func (r *reflectionFreeDCodec) encodeSliceDBPointer(ec bsoncodec.EncodeContext, return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceDateTime(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []primitive.DateTime) error { +func (r *reflectionFreeDCodec) encodeSliceDateTime(vw bsonrw.ValueWriter, arr []primitive.DateTime) error { aw, err := vw.WriteArray() if err != nil { return err @@ -520,7 +519,7 @@ func (r *reflectionFreeDCodec) encodeSliceDateTime(ec bsoncodec.EncodeContext, v return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceTimeTime(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []time.Time) error { +func (r *reflectionFreeDCodec) encodeSliceTimeTime(vw bsonrw.ValueWriter, arr []time.Time) error { aw, err := vw.WriteArray() if err != nil { return err @@ -541,7 +540,7 @@ func (r *reflectionFreeDCodec) encodeSliceTimeTime(ec bsoncodec.EncodeContext, v return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceDecimal128(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []primitive.Decimal128) error { +func (r *reflectionFreeDCodec) encodeSliceDecimal128(vw bsonrw.ValueWriter, arr []primitive.Decimal128) error { aw, err := vw.WriteArray() if err != nil { return err @@ -561,7 +560,7 @@ func (r *reflectionFreeDCodec) encodeSliceDecimal128(ec bsoncodec.EncodeContext, return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceFloat32(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []float32) error { +func (r *reflectionFreeDCodec) encodeSliceFloat32(vw bsonrw.ValueWriter, arr []float32) error { aw, err := vw.WriteArray() if err != nil { return err @@ -581,7 +580,7 @@ func (r *reflectionFreeDCodec) encodeSliceFloat32(ec bsoncodec.EncodeContext, vw return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceFloat64(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []float64) error { +func (r *reflectionFreeDCodec) encodeSliceFloat64(vw bsonrw.ValueWriter, arr []float64) error { aw, err := vw.WriteArray() if err != nil { return err @@ -601,7 +600,7 @@ func (r *reflectionFreeDCodec) encodeSliceFloat64(ec bsoncodec.EncodeContext, vw return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceJavaScript(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []primitive.JavaScript) error { +func (r *reflectionFreeDCodec) encodeSliceJavaScript(vw bsonrw.ValueWriter, arr []primitive.JavaScript) error { aw, err := vw.WriteArray() if err != nil { return err @@ -621,7 +620,7 @@ func (r *reflectionFreeDCodec) encodeSliceJavaScript(ec bsoncodec.EncodeContext, return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceMinKey(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []primitive.MinKey) error { +func (r *reflectionFreeDCodec) encodeSliceMinKey(vw bsonrw.ValueWriter, arr []primitive.MinKey) error { aw, err := vw.WriteArray() if err != nil { return err @@ -641,7 +640,7 @@ func (r *reflectionFreeDCodec) encodeSliceMinKey(ec bsoncodec.EncodeContext, vw return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceMaxKey(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []primitive.MaxKey) error { +func (r *reflectionFreeDCodec) encodeSliceMaxKey(vw bsonrw.ValueWriter, arr []primitive.MaxKey) error { aw, err := vw.WriteArray() if err != nil { return err @@ -661,7 +660,7 @@ func (r *reflectionFreeDCodec) encodeSliceMaxKey(ec bsoncodec.EncodeContext, vw return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceNull(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []primitive.Null) error { +func (r *reflectionFreeDCodec) encodeSliceNull(vw bsonrw.ValueWriter, arr []primitive.Null) error { aw, err := vw.WriteArray() if err != nil { return err @@ -681,7 +680,7 @@ func (r *reflectionFreeDCodec) encodeSliceNull(ec bsoncodec.EncodeContext, vw bs return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceObjectID(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []primitive.ObjectID) error { +func (r *reflectionFreeDCodec) encodeSliceObjectID(vw bsonrw.ValueWriter, arr []primitive.ObjectID) error { aw, err := vw.WriteArray() if err != nil { return err @@ -701,7 +700,7 @@ func (r *reflectionFreeDCodec) encodeSliceObjectID(ec bsoncodec.EncodeContext, v return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceRegex(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []primitive.Regex) error { +func (r *reflectionFreeDCodec) encodeSliceRegex(vw bsonrw.ValueWriter, arr []primitive.Regex) error { aw, err := vw.WriteArray() if err != nil { return err @@ -721,7 +720,7 @@ func (r *reflectionFreeDCodec) encodeSliceRegex(ec bsoncodec.EncodeContext, vw b return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceString(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []string) error { +func (r *reflectionFreeDCodec) encodeSliceString(vw bsonrw.ValueWriter, arr []string) error { aw, err := vw.WriteArray() if err != nil { return err @@ -741,7 +740,7 @@ func (r *reflectionFreeDCodec) encodeSliceString(ec bsoncodec.EncodeContext, vw return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceSymbol(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []primitive.Symbol) error { +func (r *reflectionFreeDCodec) encodeSliceSymbol(vw bsonrw.ValueWriter, arr []primitive.Symbol) error { aw, err := vw.WriteArray() if err != nil { return err @@ -761,7 +760,7 @@ func (r *reflectionFreeDCodec) encodeSliceSymbol(ec bsoncodec.EncodeContext, vw return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceTimestamp(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []primitive.Timestamp) error { +func (r *reflectionFreeDCodec) encodeSliceTimestamp(vw bsonrw.ValueWriter, arr []primitive.Timestamp) error { aw, err := vw.WriteArray() if err != nil { return err @@ -781,7 +780,7 @@ func (r *reflectionFreeDCodec) encodeSliceTimestamp(ec bsoncodec.EncodeContext, return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceUndefined(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []primitive.Undefined) error { +func (r *reflectionFreeDCodec) encodeSliceUndefined(vw bsonrw.ValueWriter, arr []primitive.Undefined) error { aw, err := vw.WriteArray() if err != nil { return err @@ -841,7 +840,7 @@ func (r *reflectionFreeDCodec) encodeSliceD(ec bsoncodec.EncodeContext, vw bsonr return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceInt(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []int) error { +func (r *reflectionFreeDCodec) encodeSliceInt(vw bsonrw.ValueWriter, arr []int) error { aw, err := vw.WriteArray() if err != nil { return err @@ -853,7 +852,7 @@ func (r *reflectionFreeDCodec) encodeSliceInt(ec bsoncodec.EncodeContext, vw bso return err } - if err := r.encodeInt(ec, arrayValWriter, val); err != nil { + if err := r.encodeInt(arrayValWriter, val); err != nil { return err } } @@ -861,7 +860,7 @@ func (r *reflectionFreeDCodec) encodeSliceInt(ec bsoncodec.EncodeContext, vw bso return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceInt8(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []int8) error { +func (r *reflectionFreeDCodec) encodeSliceInt8(vw bsonrw.ValueWriter, arr []int8) error { aw, err := vw.WriteArray() if err != nil { return err @@ -881,7 +880,7 @@ func (r *reflectionFreeDCodec) encodeSliceInt8(ec bsoncodec.EncodeContext, vw bs return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceInt16(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []int16) error { +func (r *reflectionFreeDCodec) encodeSliceInt16(vw bsonrw.ValueWriter, arr []int16) error { aw, err := vw.WriteArray() if err != nil { return err @@ -901,7 +900,7 @@ func (r *reflectionFreeDCodec) encodeSliceInt16(ec bsoncodec.EncodeContext, vw b return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceInt32(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []int32) error { +func (r *reflectionFreeDCodec) encodeSliceInt32(vw bsonrw.ValueWriter, arr []int32) error { aw, err := vw.WriteArray() if err != nil { return err @@ -961,7 +960,7 @@ func (r *reflectionFreeDCodec) encodeSliceUint(ec bsoncodec.EncodeContext, vw bs return aw.WriteArrayEnd() } -func (r *reflectionFreeDCodec) encodeSliceUint16(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []uint16) error { +func (r *reflectionFreeDCodec) encodeSliceUint16(vw bsonrw.ValueWriter, arr []uint16) error { aw, err := vw.WriteArray() if err != nil { return err diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/registry.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/registry.go index ac21df22b..7518bc042 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/registry.go @@ -10,7 +10,7 @@ import ( var DefaultRegistry = NewRegistryBuilder().Build() // NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and -// deocders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the +// decoders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the // PrimitiveCodecs type in this package. func NewRegistryBuilder() *bsoncodec.RegistryBuilder { rb := bsoncodec.NewRegistryBuilder() diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/value.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/value.go index aa7e9e6b7..f66f6b240 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/value.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/value.go @@ -30,33 +30,12 @@ type Val struct { primitive interface{} } -func (v Val) reset() Val { - v.primitive = nil // clear out any pointers so we don't accidentally stop them from being garbage collected. - v.t = bsontype.Type(0) - v.bootstrap[0] = 0x00 - v.bootstrap[1] = 0x00 - v.bootstrap[2] = 0x00 - v.bootstrap[3] = 0x00 - v.bootstrap[4] = 0x00 - v.bootstrap[5] = 0x00 - v.bootstrap[6] = 0x00 - v.bootstrap[7] = 0x00 - v.bootstrap[8] = 0x00 - v.bootstrap[9] = 0x00 - v.bootstrap[10] = 0x00 - v.bootstrap[11] = 0x00 - v.bootstrap[12] = 0x00 - v.bootstrap[13] = 0x00 - v.bootstrap[14] = 0x00 - return v -} - func (v Val) string() string { if v.primitive != nil { return v.primitive.(string) } // The string will either end with a null byte or it fills the entire bootstrap space. - length := uint8(v.bootstrap[0]) + length := v.bootstrap[0] return string(v.bootstrap[1 : length+1]) } @@ -190,7 +169,7 @@ func (v Val) MarshalAppendBSONValue(dst []byte) (bsontype.Type, []byte, error) { case bsontype.Boolean: dst = bsoncore.AppendBoolean(dst, v.Boolean()) case bsontype.DateTime: - dst = bsoncore.AppendDateTime(dst, int64(v.DateTime())) + dst = bsoncore.AppendDateTime(dst, v.DateTime()) case bsontype.Null: case bsontype.Regex: pattern, options := v.Regex() @@ -199,9 +178,9 @@ func (v Val) MarshalAppendBSONValue(dst []byte) (bsontype.Type, []byte, error) { ns, ptr := v.DBPointer() dst = bsoncore.AppendDBPointer(dst, ns, ptr) case bsontype.JavaScript: - dst = bsoncore.AppendJavaScript(dst, string(v.JavaScript())) + dst = bsoncore.AppendJavaScript(dst, v.JavaScript()) case bsontype.Symbol: - dst = bsoncore.AppendSymbol(dst, string(v.Symbol())) + dst = bsoncore.AppendSymbol(dst, v.Symbol()) case bsontype.CodeWithScope: code, doc := v.CodeWithScope() var scope []byte @@ -491,16 +470,12 @@ func (v Val) Undefined() { if v.t != bsontype.Undefined { panic(ElementTypeError{"bson.Value.Undefined", v.t}) } - return } // UndefinedOK is the same as Undefined, except it returns a boolean instead of // panicking. func (v Val) UndefinedOK() bool { - if v.t != bsontype.Undefined { - return false - } - return true + return v.t == bsontype.Undefined } // ObjectID returns the BSON ObjectID the Value represents. It panics if the value is a BSON type @@ -568,7 +543,7 @@ func (v Val) Time() time.Time { panic(ElementTypeError{"bson.Value.Time", v.t}) } i := v.i64() - return time.Unix(int64(i)/1000, int64(i)%1000*1000000) + return time.Unix(i/1000, i%1000*1000000) } // TimeOK is the same as Time, except it returns a boolean instead of @@ -578,7 +553,7 @@ func (v Val) TimeOK() (time.Time, bool) { return time.Time{}, false } i := v.i64() - return time.Unix(int64(i)/1000, int64(i)%1000*1000000), true + return time.Unix(i/1000, i%1000*1000000), true } // Null returns the BSON undefined the Value represents. It panics if the value is a BSON type @@ -587,7 +562,6 @@ func (v Val) Null() { if v.t != bsontype.Null && v.t != bsontype.Type(0) { panic(ElementTypeError{"bson.Value.Null", v.t}) } - return } // NullOK is the same as Null, except it returns a boolean instead of @@ -783,16 +757,12 @@ func (v Val) MinKey() { if v.t != bsontype.MinKey { panic(ElementTypeError{"bson.Value.MinKey", v.t}) } - return } // MinKeyOK is the same as MinKey, except it returns a boolean instead of // panicking. func (v Val) MinKeyOK() bool { - if v.t != bsontype.MinKey { - return false - } - return true + return v.t == bsontype.MinKey } // MaxKey returns the BSON maxkey the Value represents. It panics if the value is a BSON type @@ -801,16 +771,12 @@ func (v Val) MaxKey() { if v.t != bsontype.MaxKey { panic(ElementTypeError{"bson.Value.MaxKey", v.t}) } - return } // MaxKeyOK is the same as MaxKey, except it returns a boolean instead of // panicking. func (v Val) MaxKeyOK() bool { - if v.t != bsontype.MaxKey { - return false - } - return true + return v.t == bsontype.MaxKey } // Equal compares v to v2 and returns true if they are equal. Unknown BSON types are diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md index 9342cb073..2fde89f81 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md @@ -19,5 +19,5 @@ The `Operation` type handles executing a series of commands using a `Deployment` batch split write commands, such as insert. The type itself is heavily documented, so reading the code and comments together should provide an understanding of how the type works. -This type is not meant to be used directly by callers. Instead an wrapping type should be defined -using the IDL and an implementation generated using `operationgen`. +This type is not meant to be used directly by callers. Instead a wrapping type should be defined +using the IDL. diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/auth.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/auth.go index d04369044..d45dda61b 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/auth.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/auth.go @@ -13,7 +13,6 @@ import ( "go.mongodb.org/mongo-driver/mongo/address" "go.mongodb.org/mongo-driver/mongo/description" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/operation" "go.mongodb.org/mongo-driver/x/mongo/driver/session" @@ -80,7 +79,7 @@ func (ah *authHandshaker) GetHandshakeInformation(ctx context.Context, addr addr return ah.wrapped.GetHandshakeInformation(ctx, addr, conn) } - op := operation.NewIsMaster(). + op := operation.NewHello(). AppName(ah.options.AppName). Compressors(ah.options.Compressors). SASLSupportedMechs(ah.options.DBUser). @@ -145,14 +144,14 @@ func (ah *authHandshaker) FinishHandshake(ctx context.Context, conn driver.Conne } func (ah *authHandshaker) authenticate(ctx context.Context, cfg *Config) error { - // If the initial isMaster reply included a response to the speculative authentication attempt, we only need to + // If the initial hello reply included a response to the speculative authentication attempt, we only need to // conduct the remainder of the conversation. if speculativeResponse := ah.handshakeInfo.SpeculativeAuthenticate; speculativeResponse != nil { // Defensively ensure that the server did not include a response if speculative auth was not attempted. if ah.conversation == nil { return errors.New("speculative auth was not attempted but the server included a response") } - return ah.conversation.Finish(ctx, cfg, bsoncore.Document(speculativeResponse)) + return ah.conversation.Finish(ctx, cfg, speculativeResponse) } // If the server does not support speculative authentication or the first attempt was not successful, we need to diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/aws_conv.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/aws_conv.go index fa0b0d0ec..eb0032ce3 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/aws_conv.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/aws_conv.go @@ -76,7 +76,7 @@ func (ac *awsConversation) Step(challenge []byte) (response []byte, err error) { switch ac.state { case clientStarting: ac.state = clientFirst - response, err = ac.firstMsg() + response = ac.firstMsg() case clientFirst: ac.state = clientFinal response, err = ac.finalMsg(challenge) @@ -270,7 +270,7 @@ func (ac *awsConversation) getCredentials() (*awsv4.StaticProvider, error) { return creds, err } -func (ac *awsConversation) firstMsg() ([]byte, error) { +func (ac *awsConversation) firstMsg() []byte { // Values are cached for use in final message parameters ac.nonce = make([]byte, 32) _, _ = rand.Read(ac.nonce) @@ -279,7 +279,7 @@ func (ac *awsConversation) firstMsg() ([]byte, error) { msg = bsoncore.AppendInt32Element(msg, "p", 110) msg = bsoncore.AppendBinaryElement(msg, "r", 0x00, ac.nonce) msg, _ = bsoncore.AppendDocumentEnd(msg, idx) - return msg, nil + return msg } func (ac *awsConversation) finalMsg(s1 []byte) ([]byte, error) { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/conversation.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/conversation.go index 2d76e29e4..fe8f472c9 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/conversation.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/conversation.go @@ -16,7 +16,7 @@ import ( // handshake. // // FirstMessage method returns the first message to be sent to the server. This message will be included in the initial -// isMaster command. +// hello command. // // Finish takes the server response to the initial message and conducts the remainder of the conversation to // authenticate the provided connection. diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/default.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/default.go index 4da4032f6..e266ad542 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/default.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/default.go @@ -36,7 +36,7 @@ type DefaultAuthenticator struct { Cred *Cred // The authenticator to use for speculative authentication. Because the correct auth mechanism is unknown when doing - // the initial isMaster, SCRAM-SHA-256 is used for the speculative attempt. + // the initial hello, SCRAM-SHA-256 is used for the speculative attempt. speculativeAuthenticator SpeculativeAuthenticator } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi.go index 6e9f398d0..4b860ba63 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi.go @@ -4,8 +4,9 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -//+build gssapi -//+build windows linux darwin +//go:build gssapi && (windows || linux || darwin) +// +build gssapi +// +build windows linux darwin package auth diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_enabled.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_enabled.go index d88b764fb..50522cbb6 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_enabled.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_enabled.go @@ -4,7 +4,8 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -//+build !gssapi +//go:build !gssapi +// +build !gssapi package auth diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_supported.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_supported.go index 55caa2844..10312c228 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_supported.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_supported.go @@ -4,7 +4,8 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -//+build gssapi,!windows,!linux,!darwin +//go:build gssapi && !windows && !linux && !darwin +// +build gssapi,!windows,!linux,!darwin package auth diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/rules.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/rules.go index 2649db459..ad820d8e9 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/rules.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/rules.go @@ -45,24 +45,24 @@ func (m mapRule) IsValid(value string) bool { return ok } -// whitelist is a generic rule for whitelisting -type whitelist struct { +// allowlist is a generic rule for allowlisting +type allowlist struct { rule } -// IsValid for whitelist checks if the value is within the whitelist -func (w whitelist) IsValid(value string) bool { - return w.rule.IsValid(value) +// IsValid for allowlist checks if the value is within the allowlist +func (a allowlist) IsValid(value string) bool { + return a.rule.IsValid(value) } -// blacklist is a generic rule for blacklisting -type blacklist struct { +// denylist is a generic rule for denylisting +type denylist struct { rule } -// IsValid for whitelist checks if the value is within the whitelist -func (b blacklist) IsValid(value string) bool { - return !b.rule.IsValid(value) +// IsValid for allowlist checks if the value is within the allowlist +func (d denylist) IsValid(value string) bool { + return !d.rule.IsValid(value) } type patterns []string diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/signer.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/signer.go index 1e4662b0f..23508c1f7 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/signer.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/signer.go @@ -41,7 +41,7 @@ const ( ) var ignoredHeaders = rules{ - blacklist{ + denylist{ mapRule{ authorizationHeader: struct{}{}, "User-Agent": struct{}{}, @@ -234,7 +234,7 @@ func (ctx *signingCtx) buildCredentialString() { } func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { - var headers []string + headers := make([]string, 0, len(header)) headers = append(headers, "host") for k, v := range header { if !r.IsValid(k) { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss.go index 44faae4e9..abfa4db47 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss.go @@ -4,8 +4,9 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -//+build gssapi -//+build linux darwin +//go:build gssapi && (linux || darwin) +// +build gssapi +// +build linux darwin package gssapi diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi.go index f3c3c75cc..36e9633f8 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi.go @@ -4,7 +4,8 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -//+build gssapi,windows +//go:build gssapi && windows +// +build gssapi,windows package gssapi diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbaws.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbaws.go index c485aa128..8b81865e5 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbaws.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbaws.go @@ -60,7 +60,7 @@ func (a *awsSaslAdapter) Start() (string, []byte, error) { if err != nil { return MongoDBAWS, nil, err } - return MongoDBAWS, []byte(step), nil + return MongoDBAWS, step, nil } func (a *awsSaslAdapter) Next(challenge []byte) ([]byte, error) { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbcr.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbcr.go index d39772efc..6e2c2f4dc 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbcr.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbcr.go @@ -8,11 +8,14 @@ package auth import ( "context" - "crypto/md5" "fmt" - "io" + // Ignore gosec warning "Blocklisted import crypto/md5: weak cryptographic primitive". We need + // to use MD5 here to implement the MONGODB-CR specification. + /* #nosec G501 */ + "crypto/md5" + "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" @@ -21,7 +24,8 @@ import ( // MONGODBCR is the mechanism name for MONGODB-CR. // -// The MONGODB-CR authentication mechanism is deprecated in MongoDB 4.0. +// The MONGODB-CR authentication mechanism is deprecated in MongoDB 3.6 and removed in +// MongoDB 4.0. const MONGODBCR = "MONGODB-CR" func newMongoDBCRAuthenticator(cred *Cred) (Authenticator, error) { @@ -34,7 +38,8 @@ func newMongoDBCRAuthenticator(cred *Cred) (Authenticator, error) { // MongoDBCRAuthenticator uses the MONGODB-CR algorithm to authenticate a connection. // -// The MONGODB-CR authentication mechanism is deprecated in MongoDB 4.0. +// The MONGODB-CR authentication mechanism is deprecated in MongoDB 3.6 and removed in +// MongoDB 4.0. type MongoDBCRAuthenticator struct { DB string Username string @@ -43,7 +48,8 @@ type MongoDBCRAuthenticator struct { // Auth authenticates the connection. // -// The MONGODB-CR authentication mechanism is deprecated in MongoDB 4.0. +// The MONGODB-CR authentication mechanism is deprecated in MongoDB 3.6 and removed in +// MongoDB 4.0. func (a *MongoDBCRAuthenticator) Auth(ctx context.Context, cfg *Config) error { db := a.DB @@ -92,6 +98,9 @@ func (a *MongoDBCRAuthenticator) Auth(ctx context.Context, cfg *Config) error { } func (a *MongoDBCRAuthenticator) createKey(nonce string) string { + // Ignore gosec warning "Use of weak cryptographic primitive". We need to use MD5 here to + // implement the MONGODB-CR specification. + /* #nosec G401 */ h := md5.New() _, _ = io.WriteString(h, nonce) diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/sasl.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/sasl.go index b91a08f10..a7ae3368f 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/sasl.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/sasl.go @@ -73,7 +73,7 @@ func (sc *saslConversation) FirstMessage() (bsoncore.Document, error) { bsoncore.AppendBinaryElement(nil, "payload", 0x00, payload), } if sc.speculative { - // The "db" field is only appended for speculative auth because the isMaster command is executed against admin + // The "db" field is only appended for speculative auth because the hello command is executed against admin // so this is needed to tell the server the user's auth source. For a non-speculative attempt, the SASL commands // will be executed against the auth source. saslCmdElements = append(saslCmdElements, bsoncore.AppendStringElement(nil, "db", sc.source)) diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/util.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/util.go index 36b8c074d..a75a006d5 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/util.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/util.go @@ -7,14 +7,21 @@ package auth import ( - "crypto/md5" "fmt" "io" + + // Ignore gosec warning "Blocklisted import crypto/md5: weak cryptographic primitive". We need + // to use MD5 here to implement the SCRAM specification. + /* #nosec G501 */ + "crypto/md5" ) const defaultAuthDB = "admin" func mongoPasswordDigest(username, password string) string { + // Ignore gosec warning "Use of weak cryptographic primitive". We need to use MD5 here to + // implement the SCRAM specification. + /* #nosec G401 */ h := md5.New() _, _ = io.WriteString(h, username) _, _ = io.WriteString(h, ":mongo:") diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batch_cursor.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batch_cursor.go index 4c3e4acfd..a25e35d36 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batch_cursor.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batch_cursor.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "strings" + "time" "go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/event" @@ -31,7 +32,7 @@ type BatchCursor struct { firstBatch bool cmdMonitor *event.CommandMonitor postBatchResumeToken bsoncore.Document - crypt *Crypt + crypt Crypt serverAPI *ServerAPIOptions // legacy server (< 3.2) fields @@ -129,7 +130,7 @@ type CursorOptions struct { MaxTimeMS int64 Limit int32 CommandMonitor *event.CommandMonitor - Crypt *Crypt + Crypt Crypt ServerAPI *ServerAPIOptions } @@ -183,6 +184,21 @@ func NewEmptyBatchCursor() *BatchCursor { return &BatchCursor{currentBatch: new(bsoncore.DocumentSequence)} } +// NewBatchCursorFromDocuments returns a batch cursor with current batch set to a sequence-style +// DocumentSequence containing the provided documents. +func NewBatchCursorFromDocuments(documents []byte) *BatchCursor { + return &BatchCursor{ + currentBatch: &bsoncore.DocumentSequence{ + Data: documents, + Style: bsoncore.SequenceStyle, + }, + // BatchCursors created with this function have no associated ID nor server, so no getMore + // calls will be made. + id: 0, + server: nil, + } +} + // ID returns the cursor ID for this batch cursor. func (bc *BatchCursor) ID() int64 { return bc.id @@ -345,7 +361,7 @@ func (bc *BatchCursor) getMore(ctx context.Context) { return nil } - bc.postBatchResumeToken = bsoncore.Document(pbrtDoc) + bc.postBatchResumeToken = pbrtDoc return nil }, @@ -380,7 +396,6 @@ func (bc *BatchCursor) getMore(ctx context.Context) { bc.err = err } } - return } // PostBatchResumeToken returns the latest seen post batch resume token. @@ -427,6 +442,11 @@ func (lbcd *loadBalancedCursorDeployment) Connection(_ context.Context) (Connect return lbcd.conn, nil } +// MinRTT always returns 0. It implements the driver.Server interface. +func (lbcd *loadBalancedCursorDeployment) MinRTT() time.Duration { + return 0 +} + func (lbcd *loadBalancedCursorDeployment) ProcessError(err error, conn Connection) ProcessErrorResult { return lbcd.errorProcessor.ProcessError(err, conn) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batches.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batches.go index f376b6d35..4c25abd66 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batches.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batches.go @@ -6,10 +6,6 @@ import ( "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" ) -// this is the amount of reserved buffer space in a message that the -// driver reserves for command overhead. -const reservedCommandBufferBytes = 16 * 10 * 10 * 10 - // ErrDocumentTooLarge occurs when a document that is larger than the maximum size accepted by a // server is passed to an insert command. var ErrDocumentTooLarge = errors.New("an inserted document is too large") diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go index 2e20a0558..57ce349be 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go @@ -9,6 +9,7 @@ package connstring // import "go.mongodb.org/mongo-driver/x/mongo/driver/connstr import ( "errors" "fmt" + "math/rand" "net" "net/url" "strconv" @@ -16,11 +17,15 @@ import ( "time" "go.mongodb.org/mongo-driver/internal" + "go.mongodb.org/mongo-driver/internal/randutil" "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/mongo/driver/dns" "go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage" ) +// random is a package-global pseudo-random number generator. +var random = randutil.NewLockedRand(rand.NewSource(randutil.CryptoSeed())) + // ParseAndValidate parses the provided URI into a ConnString object. // It check that all values are valid. func ParseAndValidate(s string) (ConnString, error) { @@ -80,6 +85,8 @@ type ConnString struct { MaxPoolSizeSet bool MinPoolSize uint64 MinPoolSizeSet bool + MaxConnecting uint64 + MaxConnectingSet bool Password string PasswordSet bool ReadConcernLevel string @@ -97,6 +104,8 @@ type ConnString struct { ServerSelectionTimeoutSet bool SocketTimeout time.Duration SocketTimeoutSet bool + SRVMaxHosts int + SRVServiceName string SSL bool SSLSet bool SSLClientCertificateKeyFile string @@ -254,14 +263,25 @@ func (p *parser) parse(original string) error { hosts = uri[:idx] } - var connectionArgsFromTXT []string parsedHosts := strings.Split(hosts, ",") + uri = uri[len(hosts):] + extractedDatabase, err := extractDatabaseFromURI(uri) + if err != nil { + return err + } + + uri = extractedDatabase.uri + p.Database = extractedDatabase.db + // grab connection arguments from URI + connectionArgsFromQueryString, err := extractQueryArgsFromURI(uri) + if err != nil { + return err + } + + // grab connection arguments from TXT record and enable SSL if "mongodb+srv://" + var connectionArgsFromTXT []string if p.Scheme == SchemeMongoDBSRV { - parsedHosts, err = p.dnsResolver.ParseHosts(hosts, true) - if err != nil { - return err - } connectionArgsFromTXT, err = p.dnsResolver.GetConnectionArgsFromTXT(hosts) if err != nil { return err @@ -272,35 +292,41 @@ func (p *parser) parse(original string) error { p.SSLSet = true } - for _, host := range parsedHosts { - err = p.addHost(host) + // add connection arguments from URI and TXT records to connstring + connectionArgPairs := append(connectionArgsFromTXT, connectionArgsFromQueryString...) + for _, pair := range connectionArgPairs { + err := p.addOption(pair) if err != nil { - return internal.WrapErrorf(err, "invalid host \"%s\"", host) + return err } } - if len(p.Hosts) == 0 { - return fmt.Errorf("must have at least 1 host") - } - uri = uri[len(hosts):] + // do SRV lookup if "mongodb+srv://" + if p.Scheme == SchemeMongoDBSRV { + parsedHosts, err = p.dnsResolver.ParseHosts(hosts, p.SRVServiceName, true) + if err != nil { + return err + } - extractedDatabase, err := extractDatabaseFromURI(uri) - if err != nil { - return err + // If p.SRVMaxHosts is non-zero and is less than the number of hosts, randomly + // select SRVMaxHosts hosts from parsedHosts. + if p.SRVMaxHosts > 0 && p.SRVMaxHosts < len(parsedHosts) { + random.Shuffle(len(parsedHosts), func(i, j int) { + parsedHosts[i], parsedHosts[j] = parsedHosts[j], parsedHosts[i] + }) + parsedHosts = parsedHosts[:p.SRVMaxHosts] + } } - uri = extractedDatabase.uri - p.Database = extractedDatabase.db - - connectionArgsFromQueryString, err := extractQueryArgsFromURI(uri) - connectionArgPairs := append(connectionArgsFromTXT, connectionArgsFromQueryString...) - - for _, pair := range connectionArgPairs { - err = p.addOption(pair) + for _, host := range parsedHosts { + err = p.addHost(host) if err != nil { - return err + return internal.WrapErrorf(err, "invalid host \"%s\"", host) } } + if len(p.Hosts) == 0 { + return fmt.Errorf("must have at least 1 host") + } err = p.setDefaultAuthParams(extractedDatabase.db) if err != nil { @@ -355,6 +381,16 @@ func (p *parser) validate() error { } } + // Check for invalid use of SRVMaxHosts. + if p.SRVMaxHosts > 0 { + if p.ReplicaSet != "" { + return internal.ErrSRVMaxHostsWithReplicaSet + } + if p.LoadBalanced { + return internal.ErrSRVMaxHostsWithLoadBalanced + } + } + return nil } @@ -697,13 +733,22 @@ func (p *parser) addOption(pair string) error { } p.MinPoolSize = uint64(n) p.MinPoolSizeSet = true + case "maxconnecting": + n, err := strconv.Atoi(value) + if err != nil || n < 0 { + return fmt.Errorf("invalid value for %s: %s", key, value) + } + p.MaxConnecting = uint64(n) + p.MaxConnectingSet = true case "readconcernlevel": p.ReadConcernLevel = value case "readpreference": p.ReadPreference = value case "readpreferencetags": if value == "" { - // for when readPreferenceTags= at end of URI + // If "readPreferenceTags=" is supplied, append an empty map to tag sets to + // represent a wild-card. + p.ReadPreferenceTagSets = append(p.ReadPreferenceTagSets, map[string]string{}) break } @@ -762,6 +807,31 @@ func (p *parser) addOption(pair string) error { } p.SocketTimeout = time.Duration(n) * time.Millisecond p.SocketTimeoutSet = true + case "srvmaxhosts": + // srvMaxHosts can only be set on URIs with the "mongodb+srv" scheme + if p.Scheme != SchemeMongoDBSRV { + return fmt.Errorf("cannot specify srvMaxHosts on non-SRV URI") + } + + n, err := strconv.Atoi(value) + if err != nil || n < 0 { + return fmt.Errorf("invalid value for %s: %s", key, value) + } + p.SRVMaxHosts = n + case "srvservicename": + // srvServiceName can only be set on URIs with the "mongodb+srv" scheme + if p.Scheme != SchemeMongoDBSRV { + return fmt.Errorf("cannot specify srvServiceName on non-SRV URI") + } + + // srvServiceName must be between 1 and 62 characters according to + // our specification. Empty service names are not valid, and the service + // name (including prepended underscore) should not exceed the 63 character + // limit for DNS query subdomains. + if len(value) < 1 || len(value) > 62 { + return fmt.Errorf("srvServiceName value must be between 1 and 62 characters") + } + p.SRVServiceName = value case "ssl", "tls": switch value { case "true": diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/crypt.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/crypt.go index 812db32fe..b06e05812 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/crypt.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/crypt.go @@ -40,27 +40,52 @@ type CryptOptions struct { MarkFn MarkCommandFn KmsProviders bsoncore.Document SchemaMap map[string]bsoncore.Document + TLSConfig map[string]*tls.Config BypassAutoEncryption bool } -// Crypt consumes the libmongocrypt.MongoCrypt type to iterate the mongocrypt state machine and perform encryption +// Crypt is an interface implemented by types that can encrypt and decrypt instances of +// bsoncore.Document. +// +// Users should rely on the driver's crypt type (used by default) for encryption and decryption +// unless they are perfectly confident in another implementation of Crypt. +type Crypt interface { + // Encrypt encrypts the given command. + Encrypt(ctx context.Context, db string, cmd bsoncore.Document) (bsoncore.Document, error) + // Decrypt decrypts the given command response. + Decrypt(ctx context.Context, cmdResponse bsoncore.Document) (bsoncore.Document, error) + // CreateDataKey creates a data key using the given KMS provider and options. + CreateDataKey(ctx context.Context, kmsProvider string, opts *options.DataKeyOptions) (bsoncore.Document, error) + // EncryptExplicit encrypts the given value with the given options. + EncryptExplicit(ctx context.Context, val bsoncore.Value, opts *options.ExplicitEncryptionOptions) (byte, []byte, error) + // DecryptExplicit decrypts the given encrypted value. + DecryptExplicit(ctx context.Context, subtype byte, data []byte) (bsoncore.Value, error) + // Close cleans up any resources associated with the Crypt instance. + Close() + // BypassAutoEncryption returns true if auto-encryption should be bypassed. + BypassAutoEncryption() bool +} + +// crypt consumes the libmongocrypt.MongoCrypt type to iterate the mongocrypt state machine and perform encryption // and decryption. -type Crypt struct { +type crypt struct { mongoCrypt *mongocrypt.MongoCrypt collInfoFn CollectionInfoFn keyFn KeyRetrieverFn markFn MarkCommandFn + tlsConfig map[string]*tls.Config - BypassAutoEncryption bool + bypassAutoEncryption bool } // NewCrypt creates a new Crypt instance configured with the given AutoEncryptionOptions. -func NewCrypt(opts *CryptOptions) (*Crypt, error) { - c := &Crypt{ +func NewCrypt(opts *CryptOptions) (Crypt, error) { + c := &crypt{ collInfoFn: opts.CollInfoFn, keyFn: opts.KeyFn, markFn: opts.MarkFn, - BypassAutoEncryption: opts.BypassAutoEncryption, + tlsConfig: opts.TLSConfig, + bypassAutoEncryption: opts.BypassAutoEncryption, } mongocryptOpts := options.MongoCrypt().SetKmsProviders(opts.KmsProviders).SetLocalSchemaMap(opts.SchemaMap) @@ -74,8 +99,8 @@ func NewCrypt(opts *CryptOptions) (*Crypt, error) { } // Encrypt encrypts the given command. -func (c *Crypt) Encrypt(ctx context.Context, db string, cmd bsoncore.Document) (bsoncore.Document, error) { - if c.BypassAutoEncryption { +func (c *crypt) Encrypt(ctx context.Context, db string, cmd bsoncore.Document) (bsoncore.Document, error) { + if c.bypassAutoEncryption { return cmd, nil } @@ -89,7 +114,7 @@ func (c *Crypt) Encrypt(ctx context.Context, db string, cmd bsoncore.Document) ( } // Decrypt decrypts the given command response. -func (c *Crypt) Decrypt(ctx context.Context, cmdResponse bsoncore.Document) (bsoncore.Document, error) { +func (c *crypt) Decrypt(ctx context.Context, cmdResponse bsoncore.Document) (bsoncore.Document, error) { cryptCtx, err := c.mongoCrypt.CreateDecryptionContext(cmdResponse) if err != nil { return nil, err @@ -100,7 +125,7 @@ func (c *Crypt) Decrypt(ctx context.Context, cmdResponse bsoncore.Document) (bso } // CreateDataKey creates a data key using the given KMS provider and options. -func (c *Crypt) CreateDataKey(ctx context.Context, kmsProvider string, opts *options.DataKeyOptions) (bsoncore.Document, error) { +func (c *crypt) CreateDataKey(ctx context.Context, kmsProvider string, opts *options.DataKeyOptions) (bsoncore.Document, error) { cryptCtx, err := c.mongoCrypt.CreateDataKeyContext(kmsProvider, opts) if err != nil { return nil, err @@ -111,7 +136,7 @@ func (c *Crypt) CreateDataKey(ctx context.Context, kmsProvider string, opts *opt } // EncryptExplicit encrypts the given value with the given options. -func (c *Crypt) EncryptExplicit(ctx context.Context, val bsoncore.Value, opts *options.ExplicitEncryptionOptions) (byte, []byte, error) { +func (c *crypt) EncryptExplicit(ctx context.Context, val bsoncore.Value, opts *options.ExplicitEncryptionOptions) (byte, []byte, error) { idx, doc := bsoncore.AppendDocumentStart(nil) doc = bsoncore.AppendValueElement(doc, "v", val) doc, _ = bsoncore.AppendDocumentEnd(doc, idx) @@ -132,7 +157,7 @@ func (c *Crypt) EncryptExplicit(ctx context.Context, val bsoncore.Value, opts *o } // DecryptExplicit decrypts the given encrypted value. -func (c *Crypt) DecryptExplicit(ctx context.Context, subtype byte, data []byte) (bsoncore.Value, error) { +func (c *crypt) DecryptExplicit(ctx context.Context, subtype byte, data []byte) (bsoncore.Value, error) { idx, doc := bsoncore.AppendDocumentStart(nil) doc = bsoncore.AppendBinaryElement(doc, "v", subtype, data) doc, _ = bsoncore.AppendDocumentEnd(doc, idx) @@ -152,11 +177,15 @@ func (c *Crypt) DecryptExplicit(ctx context.Context, subtype byte, data []byte) } // Close cleans up any resources associated with the Crypt instance. -func (c *Crypt) Close() { +func (c *crypt) Close() { c.mongoCrypt.Close() } -func (c *Crypt) executeStateMachine(ctx context.Context, cryptCtx *mongocrypt.Context, db string) (bsoncore.Document, error) { +func (c *crypt) BypassAutoEncryption() bool { + return c.bypassAutoEncryption +} + +func (c *crypt) executeStateMachine(ctx context.Context, cryptCtx *mongocrypt.Context, db string) (bsoncore.Document, error) { var err error for { state := cryptCtx.State() @@ -168,7 +197,7 @@ func (c *Crypt) executeStateMachine(ctx context.Context, cryptCtx *mongocrypt.Co case mongocrypt.NeedMongoKeys: err = c.retrieveKeys(ctx, cryptCtx) case mongocrypt.NeedKms: - err = c.decryptKeys(ctx, cryptCtx) + err = c.decryptKeys(cryptCtx) case mongocrypt.Ready: return cryptCtx.Finish() default: @@ -180,7 +209,7 @@ func (c *Crypt) executeStateMachine(ctx context.Context, cryptCtx *mongocrypt.Co } } -func (c *Crypt) collectionInfo(ctx context.Context, cryptCtx *mongocrypt.Context, db string) error { +func (c *crypt) collectionInfo(ctx context.Context, cryptCtx *mongocrypt.Context, db string) error { op, err := cryptCtx.NextOperation() if err != nil { return err @@ -199,7 +228,7 @@ func (c *Crypt) collectionInfo(ctx context.Context, cryptCtx *mongocrypt.Context return cryptCtx.CompleteOperation() } -func (c *Crypt) markCommand(ctx context.Context, cryptCtx *mongocrypt.Context, db string) error { +func (c *crypt) markCommand(ctx context.Context, cryptCtx *mongocrypt.Context, db string) error { op, err := cryptCtx.NextOperation() if err != nil { return err @@ -216,7 +245,7 @@ func (c *Crypt) markCommand(ctx context.Context, cryptCtx *mongocrypt.Context, d return cryptCtx.CompleteOperation() } -func (c *Crypt) retrieveKeys(ctx context.Context, cryptCtx *mongocrypt.Context) error { +func (c *crypt) retrieveKeys(ctx context.Context, cryptCtx *mongocrypt.Context) error { op, err := cryptCtx.NextOperation() if err != nil { return err @@ -236,14 +265,14 @@ func (c *Crypt) retrieveKeys(ctx context.Context, cryptCtx *mongocrypt.Context) return cryptCtx.CompleteOperation() } -func (c *Crypt) decryptKeys(ctx context.Context, cryptCtx *mongocrypt.Context) error { +func (c *crypt) decryptKeys(cryptCtx *mongocrypt.Context) error { for { kmsCtx := cryptCtx.NextKmsContext() if kmsCtx == nil { break } - if err := c.decryptKey(ctx, kmsCtx); err != nil { + if err := c.decryptKey(kmsCtx); err != nil { return err } } @@ -251,7 +280,7 @@ func (c *Crypt) decryptKeys(ctx context.Context, cryptCtx *mongocrypt.Context) e return cryptCtx.FinishKmsContexts() } -func (c *Crypt) decryptKey(ctx context.Context, kmsCtx *mongocrypt.KmsContext) error { +func (c *crypt) decryptKey(kmsCtx *mongocrypt.KmsContext) error { host, err := kmsCtx.HostName() if err != nil { return err @@ -267,7 +296,12 @@ func (c *Crypt) decryptKey(ctx context.Context, kmsCtx *mongocrypt.KmsContext) e addr = fmt.Sprintf("%s:%d", host, defaultKmsPort) } - conn, err := tls.Dial("tcp", addr, &tls.Config{}) + kmsProvider := kmsCtx.KMSProvider() + tlsCfg := c.tlsConfig[kmsProvider] + if tlsCfg == nil { + tlsCfg = &tls.Config{MinVersion: tls.VersionTLS12} + } + conn, err := tls.Dial("tcp", addr, tlsCfg) if err != nil { return err } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go index f71337d14..16268b593 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go @@ -24,14 +24,14 @@ type Resolver struct { // DefaultResolver is a Resolver that uses the default Resolver from the net package. var DefaultResolver = &Resolver{net.LookupSRV, net.LookupTXT} -// ParseHosts uses the srv string to get the hosts. -func (r *Resolver) ParseHosts(host string, stopOnErr bool) ([]string, error) { +// ParseHosts uses the srv string and service name to get the hosts. +func (r *Resolver) ParseHosts(host string, srvName string, stopOnErr bool) ([]string, error) { parsedHosts := strings.Split(host, ",") if len(parsedHosts) != 1 { return nil, fmt.Errorf("URI with SRV must include one and only one hostname") } - return r.fetchSeedlistFromSRV(parsedHosts[0], stopOnErr) + return r.fetchSeedlistFromSRV(parsedHosts[0], srvName, stopOnErr) } // GetConnectionArgsFromTXT gets the TXT record associated with the host and returns the connection arguments. @@ -64,7 +64,7 @@ func (r *Resolver) GetConnectionArgsFromTXT(host string) ([]string, error) { return connectionArgsFromTXT, nil } -func (r *Resolver) fetchSeedlistFromSRV(host string, stopOnErr bool) ([]string, error) { +func (r *Resolver) fetchSeedlistFromSRV(host string, srvName string, stopOnErr bool) ([]string, error) { var err error _, _, err = net.SplitHostPort(host) @@ -75,14 +75,18 @@ func (r *Resolver) fetchSeedlistFromSRV(host string, stopOnErr bool) ([]string, return nil, fmt.Errorf("URI with srv must not include a port number") } - _, addresses, err := r.LookupSRV("mongodb", "tcp", host) + // default to "mongodb" as service name if not supplied + if srvName == "" { + srvName = "mongodb" + } + _, addresses, err := r.LookupSRV(srvName, "tcp", host) if err != nil { return nil, err } trimmedHost := strings.TrimSuffix(host, ".") - var parsedHosts []string + parsedHosts := make([]string, 0, len(addresses)) for _, address := range addresses { trimmedAddressTarget := strings.TrimSuffix(address.Target, ".") err := validateSRVResult(trimmedAddressTarget, trimmedHost) diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go index 6ba7c7707..9a2ccda7a 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go @@ -2,6 +2,7 @@ package driver // import "go.mongodb.org/mongo-driver/x/mongo/driver" import ( "context" + "time" "go.mongodb.org/mongo-driver/mongo/address" "go.mongodb.org/mongo-driver/mongo/description" @@ -43,6 +44,9 @@ type Subscriber interface { // retrieving and returning of connections. type Server interface { Connection(context.Context) (Connection, error) + + // MinRTT returns the minimum round-trip time to the server observed over the window period. + MinRTT() time.Duration } // Connection represents a connection to a MongoDB server. @@ -50,8 +54,14 @@ type Connection interface { WriteWireMessage(context.Context, []byte) error ReadWireMessage(ctx context.Context, dst []byte) ([]byte, error) Description() description.Server + + // Close closes any underlying connection and returns or frees any resources held by the + // connection. Close is idempotent and can be called multiple times, although subsequent calls + // to Close may return an error. A connection cannot be used after it is closed. Close() error + ID() string + ServerConnectionID() *int32 Address() address.Address Stale() bool } @@ -137,11 +147,14 @@ type ErrorProcessor interface { } // HandshakeInformation contains information extracted from a MongoDB connection handshake. This is a helper type that -// augments description.Server by also tracking authentication-related fields. We use this type rather than adding -// these fields to description.Server to avoid retaining sensitive information in a user-facing type. +// augments description.Server by also tracking server connection ID and authentication-related fields. We use this type +// rather than adding authentication-related fields to description.Server to avoid retaining sensitive information in a +// user-facing type. The server connection ID is stored in this type because unlike description.Server, all handshakes are +// correlated with a single network connection. type HandshakeInformation struct { Description description.Server SpeculativeAuthenticate bsoncore.Document + ServerConnectionID *int32 SaslSupportedMechs []string } @@ -190,6 +203,11 @@ func (ssd SingleConnectionDeployment) Connection(context.Context) (Connection, e return ssd.C, nil } +// MinRTT always returns 0. It implements the driver.Server interface. +func (ssd SingleConnectionDeployment) MinRTT() time.Duration { + return 0 +} + // TODO(GODRIVER-617): We can likely use 1 type for both the Type and the RetryMode by using // 2 bits for the mode and 1 bit for the type. Although in the practical sense, we might not want to // do that since the type of retryability is tied to the operation itself and isn't going change, diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/errors.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/errors.go index 23c94a8b1..fd1e2ad1a 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/errors.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/errors.go @@ -7,6 +7,7 @@ import ( "strings" "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/internal" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" ) @@ -14,7 +15,7 @@ import ( var ( retryableCodes = []int32{11600, 11602, 10107, 13435, 13436, 189, 91, 7, 6, 89, 9001, 262} nodeIsRecoveringCodes = []int32{11600, 11602, 13436, 189, 91} - notMasterCodes = []int32{10107, 13435, 10058} + notPrimaryCodes = []int32{10107, 13435, 10058} nodeIsShuttingDownCodes = []int32{11600, 91} unknownReplWriteConcernCode = int32(79) @@ -73,7 +74,7 @@ func (e ResponseError) Error() string { if e.Wrapped != nil { return fmt.Sprintf("%s: %s", e.Message, e.Wrapped) } - return fmt.Sprintf("%s", e.Message) + return e.Message } // WriteCommandError is an error for a write command. @@ -81,6 +82,7 @@ type WriteCommandError struct { WriteConcernError *WriteConcernError WriteErrors WriteErrors Labels []string + Raw bsoncore.Document } // UnsupportedStorageEngine returns whether or not the WriteCommandError comes from a retryable write being attempted @@ -128,6 +130,7 @@ type WriteConcernError struct { Details bsoncore.Document Labels []string TopologyVersion *description.TopologyVersion + Raw bsoncore.Document } func (wce WriteConcernError) Error() string { @@ -170,15 +173,15 @@ func (wce WriteConcernError) NodeIsShuttingDown() bool { return hasNoCode && strings.Contains(wce.Message, "node is shutting down") } -// NotMaster returns true if this error is a not master error. -func (wce WriteConcernError) NotMaster() bool { - for _, code := range notMasterCodes { +// NotPrimary returns true if this error is a not primary error. +func (wce WriteConcernError) NotPrimary() bool { + for _, code := range notPrimaryCodes { if wce.Code == int64(code) { return true } } hasNoCode := wce.Code == 0 - return hasNoCode && strings.Contains(wce.Message, "not master") + return hasNoCode && strings.Contains(wce.Message, internal.LegacyNotPrimary) } // WriteError is a non-write concern failure that occurred as a result of a write @@ -188,6 +191,7 @@ type WriteError struct { Code int64 Message string Details bsoncore.Document + Raw bsoncore.Document } func (we WriteError) Error() string { return we.Message } @@ -217,6 +221,7 @@ type Error struct { Name string Wrapped error TopologyVersion *description.TopologyVersion + Raw bsoncore.Document } // UnsupportedStorageEngine returns whether e came as a result of an unsupported storage engine @@ -316,15 +321,15 @@ func (e Error) NodeIsShuttingDown() bool { return hasNoCode && strings.Contains(e.Message, "node is shutting down") } -// NotMaster returns true if this error is a not master error. -func (e Error) NotMaster() bool { - for _, code := range notMasterCodes { +// NotPrimary returns true if this error is a not primary error. +func (e Error) NotPrimary() bool { + for _, code := range notPrimaryCodes { if e.Code == code { return true } } hasNoCode := e.Code == 0 - return hasNoCode && strings.Contains(e.Message, "not master") + return hasNoCode && strings.Contains(e.Message, internal.LegacyNotPrimary) } // NamespaceNotFound returns true if this errors is a NamespaceNotFound error. @@ -416,6 +421,7 @@ func ExtractErrorFromServerResponse(doc bsoncore.Document) error { we.Details = make([]byte, len(info)) copy(we.Details, info) } + we.Raw = doc wcError.WriteErrors = append(wcError.WriteErrors, we) } case "writeConcernError": @@ -424,6 +430,7 @@ func ExtractErrorFromServerResponse(doc bsoncore.Document) error { break } wcError.WriteConcernError = new(WriteConcernError) + wcError.WriteConcernError.Raw = doc if code, exists := doc.Lookup("code").AsInt64OK(); exists { wcError.WriteConcernError.Code = code } @@ -471,6 +478,7 @@ func ExtractErrorFromServerResponse(doc bsoncore.Document) error { Name: codeName, Labels: labels, TopologyVersion: tv, + Raw: doc, } } @@ -479,6 +487,7 @@ func ExtractErrorFromServerResponse(doc bsoncore.Document) error { if wcError.WriteConcernError != nil { wcError.WriteConcernError.TopologyVersion = tv } + wcError.Raw = doc return wcError } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/binary.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/binary.go index 3794ee5c9..9e887375a 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/binary.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/binary.go @@ -4,6 +4,7 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +//go:build cse // +build cse package mongocrypt diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/errors.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/errors.go index 8c235c5bf..3401e7384 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/errors.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/errors.go @@ -4,6 +4,7 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +//go:build cse // +build cse package mongocrypt diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/errors_not_enabled.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/errors_not_enabled.go index 816099abc..706a0f9e7 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/errors_not_enabled.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/errors_not_enabled.go @@ -4,6 +4,7 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +//go:build !cse // +build !cse package mongocrypt diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt.go index edfedf1a3..ba547ad49 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt.go @@ -4,6 +4,7 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +//go:build cse // +build cse package mongocrypt diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context.go index dc0dd5145..a53801965 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context.go @@ -4,6 +4,7 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +//go:build cse // +build cse package mongocrypt diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context_not_enabled.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context_not_enabled.go index b0ff16892..624888a2f 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context_not_enabled.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context_not_enabled.go @@ -4,6 +4,7 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +//go:build !cse // +build !cse package mongocrypt diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context.go index 77ce8308b..296a22315 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context.go @@ -4,6 +4,7 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +//go:build cse // +build cse package mongocrypt @@ -32,6 +33,12 @@ func (kc *KmsContext) HostName() (string, error) { return C.GoString(hostname), nil } +// KMSProvider gets the KMS provider of the KMS context. +func (kc *KmsContext) KMSProvider() string { + kmsProvider := C.mongocrypt_kms_ctx_get_kms_provider(kc.wrapped, nil) + return C.GoString(kmsProvider) +} + // Message returns the message to send to the KMS. func (kc *KmsContext) Message() ([]byte, error) { msgBinary := newBinary() diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context_not_enabled.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context_not_enabled.go index 8026a1902..272367ea5 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context_not_enabled.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context_not_enabled.go @@ -4,6 +4,7 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +//go:build !cse // +build !cse package mongocrypt @@ -21,6 +22,11 @@ func (kc *KmsContext) Message() ([]byte, error) { panic(cseNotSupportedMsg) } +// KMSProvider gets the KMS provider of the KMS context. +func (kc *KmsContext) KMSProvider() string { + panic(cseNotSupportedMsg) +} + // BytesNeeded returns the number of bytes that should be received from the KMS. // After sending the message to the KMS, this message should be called in a loop until the number returned is 0. func (kc *KmsContext) BytesNeeded() int32 { @@ -31,8 +37,3 @@ func (kc *KmsContext) BytesNeeded() int32 { func (kc *KmsContext) FeedResponse(response []byte) error { panic(cseNotSupportedMsg) } - -// createErrorFromStatus creates a new Error from the status of the KmsContext instance. -func (kc *KmsContext) createErrorFromStatus() error { - panic(cseNotSupportedMsg) -} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go index d91cc6746..92f49976d 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go @@ -4,6 +4,7 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +//go:build !cse // +build !cse package mongocrypt diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go index 764dcb439..ed625706b 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go @@ -18,7 +18,6 @@ import ( "io/ioutil" "math/big" "net/http" - "net/url" "time" "golang.org/x/crypto/ocsp" @@ -28,9 +27,6 @@ import ( var ( tlsFeatureExtensionOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 24} mustStapleFeatureValue = big.NewInt(5) - - defaultRequestTimeout = 5 * time.Second - errGotOCSPResponse = errors.New("done") ) // Error represents an OCSP verification error @@ -126,10 +122,7 @@ func getParsedResponse(ctx context.Context, cfg config, connState tls.Connection if cfg.disableEndpointChecking { return nil, nil } - externalResponse, err := contactResponders(ctx, cfg) - if err != nil { - return nil, err - } + externalResponse := contactResponders(ctx, cfg) if externalResponse == nil { // None of the responders were available. return nil, nil @@ -210,33 +203,21 @@ func isMustStapleCertificate(cert *x509.Certificate) (bool, error) { return false, nil } -// contactResponders will send a request to the OCSP responders reported by cfg.serverCert. The first response that -// conclusively identifies cfg.serverCert as good or revoked will be returned. If all responders are unavailable or no -// responder returns a conclusive status, (nil, nil) will be returned. -func contactResponders(ctx context.Context, cfg config) (*ResponseDetails, error) { +// contactResponders will send a request to all OCSP responders reported by cfg.serverCert. The +// first response that conclusively identifies cfg.serverCert as good or revoked will be returned. +// If all responders are unavailable or no responder returns a conclusive status, it returns nil. +// contactResponders will wait for up to 5 seconds to get a certificate status response. +func contactResponders(ctx context.Context, cfg config) *ResponseDetails { if len(cfg.serverCert.OCSPServer) == 0 { - return nil, nil + return nil } - requestCtx := ctx // Either ctx or a new context derived from ctx with a five second timeout. - userContextUsed := true - var cancelFn context.CancelFunc - - // Use a context with defaultRequestTimeout if ctx does not have a deadline set or the current deadline is further - // out than defaultRequestTimeout. If the current deadline is less than less than defaultRequestTimeout out, respect - // it. Calling context.WithTimeout would do this for us, but we need to know which context we're using. - wantDeadline := time.Now().Add(defaultRequestTimeout) - if deadline, ok := ctx.Deadline(); !ok || deadline.After(wantDeadline) { - userContextUsed = false - requestCtx, cancelFn = context.WithDeadline(ctx, wantDeadline) - } - defer func() { - if cancelFn != nil { - cancelFn() - } - }() + // Limit all OCSP responder calls to a maximum of 5 seconds or when the passed-in context expires, + // whichever happens first. + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() - group, groupCtx := errgroup.WithContext(requestCtx) + group, ctx := errgroup.WithContext(ctx) ocspResponses := make(chan *ocsp.Response, len(cfg.serverCert.OCSPServer)) defer close(ocspResponses) @@ -244,6 +225,11 @@ func contactResponders(ctx context.Context, cfg config) (*ResponseDetails, error // Re-assign endpoint so it gets re-scoped rather than using the iteration variable in the goroutine. See // https://golang.org/doc/faq#closures_and_goroutines. endpoint := endpoint + + // Start a group of goroutines that each attempt to request the certificate status from one + // of the OCSP endpoints listed in the server certificate. We want to "soft fail" on all + // errors, so this function never returns actual errors. Only a "done" error is returned + // when a response is received so the errgroup cancels any other in-progress requests. group.Go(func() error { // Use bytes.NewReader instead of bytes.NewBuffer because a bytes.Buffer is an owning representation and the // docs recommend not using the underlying []byte after creating the buffer, so a new copy of the request @@ -252,35 +238,16 @@ func contactResponders(ctx context.Context, cfg config) (*ResponseDetails, error if err != nil { return nil } - request = request.WithContext(groupCtx) - - // Execute the request and handle errors as follows: - // - // 1. If the original context expired or was cancelled, propagate the error up so the caller will abort the - // verification and return control to the user. - // - // 2. If any other errors occurred, including the defaultRequestTimeout expiring, or the response has a - // non-200 status code, suppress the error because we want to ignore this responder and wait for a different - // one to responsd. + request = request.WithContext(ctx) + httpResponse, err := http.DefaultClient.Do(request) if err != nil { - urlErr, ok := err.(*url.Error) - if !ok { - return nil - } - - timeout := urlErr.Timeout() - cancelled := urlErr.Err == context.Canceled // Timeout() does not return true for context.Cancelled. - if cancelled || (userContextUsed && timeout) { - // Handle the original context expiring or being cancelled. The url.Error type supports Unwrap, so - // users can use errors.Is to check for context errors. - return err - } - return nil // Ignore all other errors. + return nil } defer func() { _ = httpResponse.Body.Close() }() + if httpResponse.StatusCode != 200 { return nil } @@ -292,26 +259,27 @@ func contactResponders(ctx context.Context, cfg config) (*ResponseDetails, error ocspResponse, err := ocsp.ParseResponseForCert(httpBytes, cfg.serverCert, cfg.issuer) if err != nil || verifyResponse(cfg, ocspResponse) != nil || ocspResponse.Status == ocsp.Unknown { - // If there was an error parsing/validating the response or the response was inconclusive, suppress - // the error because we want to ignore this responder. + // If there was an error parsing/validating the response or the response was + // inconclusive, suppress the error because we want to ignore this responder. return nil } - // Store the response and return a sentinel error so the error group will exit and any in-flight requests - // will be cancelled. + // Send the conclusive response on the response channel and return a "done" error that + // will cause the errgroup to cancel all other in-progress requests. ocspResponses <- ocspResponse - return errGotOCSPResponse + return errors.New("done") }) } - if err := group.Wait(); err != nil && err != errGotOCSPResponse { - return nil, err - } - if len(ocspResponses) == 0 { - // None of the responders gave a conclusive response. - return nil, nil + _ = group.Wait() + select { + case res := <-ocspResponses: + return extractResponseDetails(res) + default: + // If there is no OCSP response on the response channel, all OCSP calls either failed or + // were inconclusive. Return nil. + return nil } - return extractResponseDetails(<-ocspResponses), nil } // verifyResponse checks that the provided OCSP response is valid. diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go index 5c2593bd7..f1647beff 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go @@ -13,6 +13,7 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/readconcern" "go.mongodb.org/mongo-driver/mongo/readpref" @@ -46,6 +47,11 @@ const ( readSnapshotMinWireVersion int32 = 13 ) +// RetryablePoolError is a connection pool error that can be retried while executing an operation. +type RetryablePoolError interface { + Retryable() bool +} + // InvalidOperationError is returned from Validate and indicates that a required field is missing // from an instance of Operation. type InvalidOperationError struct{ MissingField string } @@ -72,20 +78,22 @@ type startedInformation struct { cmdName string documentSequenceIncluded bool connID string + serverConnID *int32 redacted bool serviceID *primitive.ObjectID } // finishedInformation keeps track of all of the information necessary for monitoring success and failure events. type finishedInformation struct { - cmdName string - requestID int32 - response bsoncore.Document - cmdErr error - connID string - startTime time.Time - redacted bool - serviceID *primitive.ObjectID + cmdName string + requestID int32 + response bsoncore.Document + cmdErr error + connID string + serverConnID *int32 + startTime time.Time + redacted bool + serviceID *primitive.ObjectID } // ResponseInfo contains the context required to parse a server response. @@ -194,18 +202,22 @@ type Operation struct { CommandMonitor *event.CommandMonitor // Crypt specifies a Crypt object to use for automatic client side encryption and decryption. - Crypt *Crypt + Crypt Crypt // ServerAPI specifies options used to configure the API version sent to the server. ServerAPI *ServerAPIOptions + // IsOutputAggregate specifies whether this operation is an aggregate with an output stage. If true, + // read preference will not be added to the command on wire versions < 13. + IsOutputAggregate bool + // cmdName is only set when serializing OP_MSG and is used internally in readWireMessage. cmdName string } // shouldEncrypt returns true if this operation should automatically be encrypted. func (op Operation) shouldEncrypt() bool { - return op.Crypt != nil && !op.Crypt.BypassAutoEncryption + return op.Crypt != nil && !op.Crypt.BypassAutoEncryption() } // selectServer handles performing server selection for an operation. @@ -298,40 +310,8 @@ func (op Operation) Execute(ctx context.Context, scratch []byte) error { } } - srvr, conn, err := op.getServerAndConnection(ctx) - if err != nil { - return err - } - defer conn.Close() - - desc := description.SelectedServer{Server: conn.Description(), Kind: op.Deployment.Kind()} - scratch = scratch[:0] - - if desc.WireVersion == nil || desc.WireVersion.Max < 4 { - switch op.Legacy { - case LegacyFind: - return op.legacyFind(ctx, scratch, srvr, conn, desc) - case LegacyGetMore: - return op.legacyGetMore(ctx, scratch, srvr, conn, desc) - case LegacyKillCursors: - return op.legacyKillCursors(ctx, scratch, srvr, conn, desc) - } - } - if desc.WireVersion == nil || desc.WireVersion.Max < 3 { - switch op.Legacy { - case LegacyListCollections: - return op.legacyListCollections(ctx, scratch, srvr, conn, desc) - case LegacyListIndexes: - return op.legacyListIndexes(ctx, scratch, srvr, conn, desc) - } - } - - var res bsoncore.Document - var operationErr WriteCommandError - var original error var retries int - retryable := op.retryable(desc.Server) - if retryable && op.RetryMode != nil { + if op.RetryMode != nil { switch op.Type { case Write: if op.Client == nil { @@ -343,15 +323,6 @@ func (op Operation) Execute(ctx context.Context, scratch []byte) error { case RetryContext: retries = -1 } - - op.Client.RetryWrite = false - if *op.RetryMode > RetryNone { - op.Client.RetryWrite = true - if !op.Client.Committing && !op.Client.Aborting { - op.Client.IncrementTxnNumber() - } - } - case Read: switch *op.RetryMode { case RetryOnce, RetryOncePerCommand: @@ -361,10 +332,107 @@ func (op Operation) Execute(ctx context.Context, scratch []byte) error { } } } + + var srvr Server + var conn Connection + var res bsoncore.Document + var operationErr WriteCommandError + var prevErr error batching := op.Batches.Valid() retryEnabled := op.RetryMode != nil && op.RetryMode.Enabled() + retrySupported := false + first := true currIndex := 0 + + // resetForRetry records the error that caused the retry, decrements retries, and resets the + // retry loop variables to request a new server and a new connection for the next attempt. + resetForRetry := func(err error) { + retries-- + prevErr = err + // If we got a connection, close it immediately to release pool resources for + // subsequent retries. + if conn != nil { + conn.Close() + } + // Set the server and connection to nil to request a new server and connection. + srvr = nil + conn = nil + } + for { + // If the server or connection are nil, try to select a new server and get a new connection. + if srvr == nil || conn == nil { + srvr, conn, err = op.getServerAndConnection(ctx) + if err != nil { + // If the returned error is retryable and there are retries remaining (negative + // retries means retry indefinitely), then retry the operation. Set the server + // and connection to nil to request a new server and connection. + if rerr, ok := err.(RetryablePoolError); ok && rerr.Retryable() && retries != 0 { + resetForRetry(err) + continue + } + + // If this is a retry and there's an error from a previous attempt, return the previous + // error instead of the current connection error. + if prevErr != nil { + return prevErr + } + return err + } + defer conn.Close() + } + + // Run steps that must only be run on the first attempt, but not again for retries. + if first { + // Determine if retries are supported for the current operation on the current server + // description. Per the retryable writes specification, only determine this for the + // first server selected: + // + // If the server selected for the first attempt of a retryable write operation does + // not support retryable writes, drivers MUST execute the write as if retryable writes + // were not enabled. + retrySupported = op.retryable(conn.Description()) + + // If retries are supported for the current operation on the current server description, + // client retries are enabled, the operation type is write, and we haven't incremented + // the txn number yet, enable retry writes on the session and increment the txn number. + // Calling IncrementTxnNumber() for server descriptions or topologies that do not + // support retries (e.g. standalone topologies) will cause server errors. Only do this + // check for the first attempt to keep retried writes in the same transaction. + if retrySupported && op.RetryMode != nil && op.Type == Write && op.Client != nil { + op.Client.RetryWrite = false + if op.RetryMode.Enabled() { + op.Client.RetryWrite = true + if !op.Client.Committing && !op.Client.Aborting { + op.Client.IncrementTxnNumber() + } + } + } + + first = false + } + + desc := description.SelectedServer{Server: conn.Description(), Kind: op.Deployment.Kind()} + scratch = scratch[:0] + if desc.WireVersion == nil || desc.WireVersion.Max < 4 { + switch op.Legacy { + case LegacyFind: + return op.legacyFind(ctx, scratch, srvr, conn, desc) + case LegacyGetMore: + return op.legacyGetMore(ctx, scratch, srvr, conn, desc) + case LegacyKillCursors: + return op.legacyKillCursors(ctx, scratch, srvr, conn, desc) + } + } + if desc.WireVersion == nil || desc.WireVersion.Max < 3 { + switch op.Legacy { + case LegacyListCollections: + return op.legacyListCollections(ctx, scratch, srvr, conn, desc) + case LegacyListIndexes: + return op.legacyListIndexes(ctx, scratch, srvr, conn, desc) + } + } + if batching { targetBatchSize := desc.MaxDocumentSize maxDocSize := desc.MaxDocumentSize @@ -398,6 +466,7 @@ func (op Operation) Execute(ctx context.Context, scratch []byte) error { op.cmdName = startedInfo.cmdName startedInfo.redacted = op.redactCommand(startedInfo.cmdName, startedInfo.cmd) startedInfo.serviceID = conn.Description().ServiceID + startedInfo.serverConnID = conn.ServerConnectionID() op.publishStartedEvent(ctx, startedInfo) // get the moreToCome flag information before we compress @@ -412,23 +481,32 @@ func (op Operation) Execute(ctx context.Context, scratch []byte) error { } finishedInfo := finishedInformation{ - cmdName: startedInfo.cmdName, - requestID: startedInfo.requestID, - startTime: time.Now(), - connID: startedInfo.connID, - redacted: startedInfo.redacted, - serviceID: startedInfo.serviceID, + cmdName: startedInfo.cmdName, + requestID: startedInfo.requestID, + startTime: time.Now(), + connID: startedInfo.connID, + serverConnID: startedInfo.serverConnID, + redacted: startedInfo.redacted, + serviceID: startedInfo.serviceID, } - // roundtrip using either the full roundTripper or a special one for when the moreToCome - // flag is set - var roundTrip = op.roundTrip - if moreToCome { - roundTrip = op.moreToComeRoundTrip - } - res, err = roundTrip(ctx, conn, wm) - if ep, ok := srvr.(ErrorProcessor); ok { - _ = ep.ProcessError(err, conn) + // Check if there's enough time to perform a best-case network round trip before the Context + // deadline. If not, create a network error that wraps a context.DeadlineExceeded error and + // skip the actual round trip. + if deadline, ok := ctx.Deadline(); ok && time.Now().Add(srvr.MinRTT()).After(deadline) { + err = op.networkError(context.DeadlineExceeded) + } else { + // roundtrip using either the full roundTripper or a special one for when the moreToCome + // flag is set + var roundTrip = op.roundTrip + if moreToCome { + roundTrip = op.moreToComeRoundTrip + } + res, err = roundTrip(ctx, conn, wm) + + if ep, ok := srvr.(ErrorProcessor); ok { + _ = ep.ProcessError(err, conn) + } } finishedInfo.response = res @@ -438,7 +516,7 @@ func (op Operation) Execute(ctx context.Context, scratch []byte) error { var perr error switch tt := err.(type) { case WriteCommandError: - if e := err.(WriteCommandError); retryable && op.Type == Write && e.UnsupportedStorageEngine() { + if e := err.(WriteCommandError); retrySupported && op.Type == Write && e.UnsupportedStorageEngine() { return ErrUnsupportedStorageEngine } @@ -453,23 +531,16 @@ func (op Operation) Execute(ctx context.Context, scratch []byte) error { tt.Labels = append(tt.Labels, RetryableWriteError) } - if retryable && retryableErr && retries != 0 { - retries-- - original, err = err, nil - conn.Close() // Avoid leaking the connection. - srvr, conn, err = op.getServerAndConnection(ctx) - if err != nil || conn == nil || !op.retryable(conn.Description()) { - if conn != nil { - conn.Close() - } - return original - } - defer conn.Close() // Avoid leaking the new connection. + // If retries are supported for the current operation on the first server description, + // the error is considered retryable, and there are retries remaining (negative retries + // means retry indefinitely), then retry the operation. + if retrySupported && retryableErr && retries != 0 { if op.Client != nil && op.Client.Committing { // Apply majority write concern for retries op.Client.UpdateCommitTransactionWriteConcern() op.WriteConcern = op.Client.CurrentWc } + resetForRetry(tt) continue } @@ -482,7 +553,7 @@ func (op Operation) Execute(ctx context.Context, scratch []byte) error { ConnectionDescription: desc.Server, CurrentIndex: currIndex, } - perr = op.ProcessResponseFn(info) + _ = op.ProcessResponseFn(info) } if batching && len(tt.WriteErrors) > 0 && currIndex > 0 { @@ -493,7 +564,7 @@ func (op Operation) Execute(ctx context.Context, scratch []byte) error { // If batching is enabled and either ordered is the default (which is true) or // explicitly set to true and we have write errors, return the errors. - if batching && (op.Batches.Ordered == nil || *op.Batches.Ordered == true) && len(tt.WriteErrors) > 0 { + if batching && (op.Batches.Ordered == nil || *op.Batches.Ordered) && len(tt.WriteErrors) > 0 { return tt } if op.Client != nil && op.Client.Committing && tt.WriteConcernError != nil { @@ -503,6 +574,7 @@ func (op Operation) Execute(ctx context.Context, scratch []byte) error { Code: int32(tt.WriteConcernError.Code), Message: tt.WriteConcernError.Message, Labels: tt.Labels, + Raw: tt.Raw, } // The UnknownTransactionCommitResult label is added to all writeConcernErrors besides unknownReplWriteConcernCode // and unsatisfiableWriteConcernCode @@ -517,13 +589,15 @@ func (op Operation) Execute(ctx context.Context, scratch []byte) error { operationErr.WriteConcernError = tt.WriteConcernError operationErr.WriteErrors = append(operationErr.WriteErrors, tt.WriteErrors...) operationErr.Labels = tt.Labels + operationErr.Raw = tt.Raw case Error: if tt.HasErrorLabel(TransientTransactionError) || tt.HasErrorLabel(UnknownTransactionCommitResult) { if err := op.Client.ClearPinnedResources(); err != nil { return err } } - if e := err.(Error); retryable && op.Type == Write && e.UnsupportedStorageEngine() { + + if e := err.(Error); retrySupported && op.Type == Write && e.UnsupportedStorageEngine() { return ErrUnsupportedStorageEngine } @@ -544,23 +618,16 @@ func (op Operation) Execute(ctx context.Context, scratch []byte) error { retryableErr = tt.RetryableRead() } - if retryable && retryableErr && retries != 0 { - retries-- - original, err = err, nil - conn.Close() // Avoid leaking the connection. - srvr, conn, err = op.getServerAndConnection(ctx) - if err != nil || conn == nil || !op.retryable(conn.Description()) { - if conn != nil { - conn.Close() - } - return original - } - defer conn.Close() // Avoid leaking the new connection. + // If retries are supported for the current operation on the first server description, + // the error is considered retryable, and there are retries remaining (negative retries + // means retry indefinitely), then retry the operation. + if retrySupported && retryableErr && retries != 0 { if op.Client != nil && op.Client.Committing { // Apply majority write concern for retries op.Client.UpdateCommitTransactionWriteConcern() op.WriteConcern = op.Client.CurrentWc } + resetForRetry(tt) continue } @@ -573,7 +640,7 @@ func (op Operation) Execute(ctx context.Context, scratch []byte) error { ConnectionDescription: desc.Server, CurrentIndex: currIndex, } - perr = op.ProcessResponseFn(info) + _ = op.ProcessResponseFn(info) } if op.Client != nil && op.Client.Committing && (retryableErr || tt.Code == 50) { @@ -607,13 +674,16 @@ func (op Operation) Execute(ctx context.Context, scratch []byte) error { ConnectionDescription: desc.Server, CurrentIndex: currIndex, } - perr = op.ProcessResponseFn(info) + _ = op.ProcessResponseFn(info) } return err } + // If we're batching and there are batches remaining, advance to the next batch. This isn't + // a retry, so increment the transaction number, reset the retries number, and don't set + // server or connection to nil to continue using the same connection. if batching && len(op.Batches.Documents) > 0 { - if retryable && op.Client != nil && op.RetryMode != nil { + if retrySupported && op.Client != nil && op.RetryMode != nil { if *op.RetryMode > RetryNone { op.Client.IncrementTxnNumber() } @@ -664,17 +734,7 @@ func (op Operation) retryable(desc description.Server) bool { func (op Operation) roundTrip(ctx context.Context, conn Connection, wm []byte) ([]byte, error) { err := conn.WriteWireMessage(ctx, wm) if err != nil { - labels := []string{NetworkError} - if op.Client != nil { - op.Client.MarkDirty() - } - if op.Client != nil && op.Client.TransactionRunning() && !op.Client.Committing { - labels = append(labels, TransientTransactionError) - } - if op.Client != nil && op.Client.Committing { - labels = append(labels, UnknownTransactionCommitResult) - } - return nil, Error{Message: err.Error(), Labels: labels, Wrapped: err} + return nil, op.networkError(err) } return op.readWireMessage(ctx, conn, wm) @@ -685,17 +745,7 @@ func (op Operation) readWireMessage(ctx context.Context, conn Connection, wm []b wm, err = conn.ReadWireMessage(ctx, wm[:0]) if err != nil { - labels := []string{NetworkError} - if op.Client != nil { - op.Client.MarkDirty() - } - if op.Client != nil && op.Client.TransactionRunning() && !op.Client.Committing { - labels = append(labels, TransientTransactionError) - } - if op.Client != nil && op.Client.Committing { - labels = append(labels, UnknownTransactionCommitResult) - } - return nil, Error{Message: err.Error(), Labels: labels, Wrapped: err} + return nil, op.networkError(err) } // If we're using a streamable connection, we set its streaming state based on the moreToCome flag in the server @@ -734,6 +784,27 @@ func (op Operation) readWireMessage(ctx context.Context, conn Connection, wm []b return res, nil } +// networkError wraps the provided error in an Error with label "NetworkError" and, if a transaction +// is running or committing, the appropriate transaction state labels. The returned error indicates +// the operation should be retried for reads and writes. If err is nil, networkError returns nil. +func (op Operation) networkError(err error) error { + if err == nil { + return nil + } + + labels := []string{NetworkError} + if op.Client != nil { + op.Client.MarkDirty() + } + if op.Client != nil && op.Client.TransactionRunning() && !op.Client.Committing { + labels = append(labels, TransientTransactionError) + } + if op.Client != nil && op.Client.Committing { + labels = append(labels, UnknownTransactionCommitResult) + } + return Error{Message: err.Error(), Labels: labels, Wrapped: err} +} + // moreToComeRoundTrip writes a wiremessage to the provided connection. This is used when an OP_MSG is // being sent with the moreToCome bit set. func (op *Operation) moreToComeRoundTrip(ctx context.Context, conn Connection, wm []byte) ([]byte, error) { @@ -795,8 +866,10 @@ func (Operation) decompressWireMessage(wm []byte) ([]byte, error) { func (op Operation) createWireMessage(ctx context.Context, dst []byte, desc description.SelectedServer, conn Connection) ([]byte, startedInformation, error) { - - if desc.WireVersion == nil || desc.WireVersion.Max < wiremessage.OpmsgWireVersion { + // If topology is not LoadBalanced, API version is not declared, and wire version is unknown + // or less than 6, use OP_QUERY. Otherwise, use OP_MSG. + if desc.Kind != description.LoadBalanced && op.ServerAPI == nil && + (desc.WireVersion == nil || desc.WireVersion.Max < wiremessage.OpmsgWireVersion) { return op.createQueryWireMessage(dst, desc) } return op.createMsgWireMessage(ctx, dst, desc, conn) @@ -813,7 +886,7 @@ func (op Operation) addBatchArray(dst []byte) []byte { func (op Operation) createQueryWireMessage(dst []byte, desc description.SelectedServer) ([]byte, startedInformation, error) { var info startedInformation - flags := op.slaveOK(desc) + flags := op.secondaryOK(desc) var wmindex int32 info.requestID = wiremessage.NextRequestID() wmindex, dst = wiremessage.AppendHeaderStart(dst, info.requestID, 0, wiremessage.OpQuery) @@ -826,7 +899,7 @@ func (op Operation) createQueryWireMessage(dst []byte, desc description.Selected dst = wiremessage.AppendQueryNumberToReturn(dst, -1) wrapper := int32(-1) - rp, err := op.createReadPref(desc.Server.Kind, desc.Kind, true) + rp, err := op.createReadPref(desc, true) if err != nil { return dst, info, err } @@ -924,7 +997,7 @@ func (op Operation) createMsgWireMessage(ctx context.Context, dst []byte, desc d dst = op.addServerAPI(dst) dst = bsoncore.AppendStringElement(dst, "$db", op.Database) - rp, err := op.createReadPref(desc.Server.Kind, desc.Kind, false) + rp, err := op.createReadPref(desc, false) if err != nil { return dst, info, err } @@ -1185,9 +1258,17 @@ func (op Operation) getReadPrefBasedOnTransaction() (*readpref.ReadPref, error) return op.ReadPreference, nil } -func (op Operation) createReadPref(serverKind description.ServerKind, topologyKind description.TopologyKind, isOpQuery bool) (bsoncore.Document, error) { - if serverKind == description.Standalone || (isOpQuery && serverKind != description.Mongos) || op.Type == Write { - // Don't send read preference for non-mongos when using OP_QUERY or for all standalones +func (op Operation) createReadPref(desc description.SelectedServer, isOpQuery bool) (bsoncore.Document, error) { + // TODO(GODRIVER-2231): Instead of checking if isOutputAggregate and desc.Server.WireVersion.Max < 13, + // somehow check if supplied readPreference was "overwritten" with primary in description.selectForReplicaSet. + if desc.Server.Kind == description.Standalone || (isOpQuery && desc.Server.Kind != description.Mongos) || + op.Type == Write || (op.IsOutputAggregate && desc.Server.WireVersion.Max < 13) { + // Don't send read preference for: + // 1. all standalones + // 2. non-mongos when using OP_QUERY + // 3. all writes + // 4. when operation is an aggregate with an output stage, and selected server's wire + // version is < 13 return nil, nil } @@ -1198,7 +1279,7 @@ func (op Operation) createReadPref(serverKind description.ServerKind, topologyKi } if rp == nil { - if topologyKind == description.Single && serverKind != description.Mongos { + if desc.Kind == description.Single && desc.Server.Kind != description.Mongos { doc = bsoncore.AppendStringElement(doc, "mode", "primaryPreferred") doc, _ = bsoncore.AppendDocumentEnd(doc, idx) return doc, nil @@ -1208,10 +1289,10 @@ func (op Operation) createReadPref(serverKind description.ServerKind, topologyKi switch rp.Mode() { case readpref.PrimaryMode: - if serverKind == description.Mongos { + if desc.Server.Kind == description.Mongos { return nil, nil } - if topologyKind == description.Single { + if desc.Kind == description.Single { doc = bsoncore.AppendStringElement(doc, "mode", "primaryPreferred") doc, _ = bsoncore.AppendDocumentEnd(doc, idx) return doc, nil @@ -1221,7 +1302,7 @@ func (op Operation) createReadPref(serverKind description.ServerKind, topologyKi doc = bsoncore.AppendStringElement(doc, "mode", "primaryPreferred") case readpref.SecondaryPreferredMode: _, ok := rp.MaxStaleness() - if serverKind == description.Mongos && isOpQuery && !ok && len(rp.TagSets()) == 0 && rp.HedgeEnabled() == nil { + if desc.Server.Kind == description.Mongos && isOpQuery && !ok && len(rp.TagSets()) == 0 && rp.HedgeEnabled() == nil { return nil, nil } doc = bsoncore.AppendStringElement(doc, "mode", "secondaryPreferred") @@ -1233,9 +1314,6 @@ func (op Operation) createReadPref(serverKind description.ServerKind, topologyKi sets := make([]bsoncore.Document, 0, len(rp.TagSets())) for _, ts := range rp.TagSets() { - if len(ts) == 0 { - continue - } i, set := bsoncore.AppendDocumentStart(nil) for _, t := range ts { set = bsoncore.AppendStringElement(set, t.Name, t.Value) @@ -1270,20 +1348,20 @@ func (op Operation) createReadPref(serverKind description.ServerKind, topologyKi return doc, nil } -func (op Operation) slaveOK(desc description.SelectedServer) wiremessage.QueryFlag { +func (op Operation) secondaryOK(desc description.SelectedServer) wiremessage.QueryFlag { if desc.Kind == description.Single && desc.Server.Kind != description.Mongos { - return wiremessage.SlaveOK + return wiremessage.SecondaryOK } if rp := op.ReadPreference; rp != nil && rp.Mode() != readpref.PrimaryMode { - return wiremessage.SlaveOK + return wiremessage.SecondaryOK } return 0 } func (Operation) canCompress(cmd string) bool { - if cmd == "isMaster" || cmd == "saslStart" || cmd == "saslContinue" || cmd == "getnonce" || cmd == "authenticate" || + if cmd == internal.LegacyHello || cmd == "hello" || cmd == "saslStart" || cmd == "saslContinue" || cmd == "getnonce" || cmd == "authenticate" || cmd == "createUser" || cmd == "updateUser" || cmd == "copydbSaslStart" || cmd == "copydbgetnonce" || cmd == "copydb" { return false } @@ -1439,11 +1517,11 @@ func (op *Operation) redactCommand(cmd string, doc bsoncore.Document) bool { return true } - if strings.ToLower(cmd) != "ismaster" && cmd != "hello" { + if strings.ToLower(cmd) != internal.LegacyHelloLowercase && cmd != "hello" { return false } - // An isMaster without speculative authentication can be monitored. + // A hello without speculative authentication can be monitored. _, err := doc.LookupErr("speculativeAuthenticate") return err == nil } @@ -1470,12 +1548,13 @@ func (op Operation) publishStartedEvent(ctx context.Context, info startedInforma } started := &event.CommandStartedEvent{ - Command: cmdCopy, - DatabaseName: op.Database, - CommandName: info.cmdName, - RequestID: int64(info.requestID), - ConnectionID: info.connID, - ServiceID: info.serviceID, + Command: cmdCopy, + DatabaseName: op.Database, + CommandName: info.cmdName, + RequestID: int64(info.requestID), + ConnectionID: info.connID, + ServerConnectionID: info.serverConnID, + ServiceID: info.serviceID, } op.CommandMonitor.Started(ctx, started) } @@ -1494,15 +1573,16 @@ func (op Operation) publishFinishedEvent(ctx context.Context, info finishedInfor var durationNanos int64 var emptyTime time.Time if info.startTime != emptyTime { - durationNanos = time.Now().Sub(info.startTime).Nanoseconds() + durationNanos = time.Since(info.startTime).Nanoseconds() } finished := event.CommandFinishedEvent{ - CommandName: info.cmdName, - RequestID: int64(info.requestID), - ConnectionID: info.connID, - DurationNanos: durationNanos, - ServiceID: info.serviceID, + CommandName: info.cmdName, + RequestID: int64(info.requestID), + ConnectionID: info.connID, + DurationNanos: durationNanos, + ServerConnectionID: info.serverConnID, + ServiceID: info.serviceID, } if success { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/abort_transaction.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/abort_transaction.go index b1957a3ed..608733b45 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/abort_transaction.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/abort_transaction.go @@ -4,8 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Code generated by operationgen. DO NOT EDIT. - package operation import ( @@ -27,7 +25,7 @@ type AbortTransaction struct { clock *session.ClusterClock collection string monitor *event.CommandMonitor - crypt *driver.Crypt + crypt driver.Crypt database string deployment driver.Deployment selector description.ServerSelector @@ -46,7 +44,7 @@ func (at *AbortTransaction) processResponse(driver.ResponseInfo) error { return err } -// Execute runs this operations and returns an error if the operaiton did not execute successfully. +// Execute runs this operations and returns an error if the operation did not execute successfully. func (at *AbortTransaction) Execute(ctx context.Context) error { if at.deployment == nil { return errors.New("the AbortTransaction operation must have a Deployment set before Execute can be called") @@ -130,7 +128,7 @@ func (at *AbortTransaction) CommandMonitor(monitor *event.CommandMonitor) *Abort } // Crypt sets the Crypt object to use for automatic encryption and decryption. -func (at *AbortTransaction) Crypt(crypt *driver.Crypt) *AbortTransaction { +func (at *AbortTransaction) Crypt(crypt driver.Crypt) *AbortTransaction { if at == nil { at = new(AbortTransaction) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/abort_transaction.toml b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/abort_transaction.toml deleted file mode 100644 index d4789dd3e..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/abort_transaction.toml +++ /dev/null @@ -1,17 +0,0 @@ -version = 0 -name = "AbortTransaction" -documentation = "AbortTransaction performs an abortTransaction operation." - -[properties] -enabled = ["write concern"] -retryable = {mode = "once per command", type = "writes"} - -[command] -name = "abortTransaction" -parameter = "database" - -[request.recoveryToken] -type = "document" -documentation = """ -RecoveryToken sets the recovery token to use when committing or aborting a sharded transaction.\ -""" diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.go index 311d28744..e3554e338 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.go @@ -4,8 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Code generated by operationgen. DO NOT EDIT. - package operation import ( @@ -23,7 +21,7 @@ import ( "go.mongodb.org/mongo-driver/x/mongo/driver/session" ) -// Performs an aggregate operation +// Aggregate represents an aggregate operation. type Aggregate struct { allowDiskUse *bool batchSize *int32 @@ -44,9 +42,11 @@ type Aggregate struct { retry *driver.RetryMode selector description.ServerSelector writeConcern *writeconcern.WriteConcern - crypt *driver.Crypt + crypt driver.Crypt serverAPI *driver.ServerAPIOptions let bsoncore.Document + hasOutputStage bool + customOptions map[string]bsoncore.Value result driver.CursorResponse } @@ -68,6 +68,8 @@ func (a *Aggregate) Result(opts driver.CursorOptions) (*driver.BatchCursor, erro return driver.NewBatchCursor(a.result, clientSession, clock, opts) } +// ResultCursorResponse returns the underlying CursorResponse result of executing this +// operation. func (a *Aggregate) ResultCursorResponse() driver.CursorResponse { return a.result } @@ -80,7 +82,7 @@ func (a *Aggregate) processResponse(info driver.ResponseInfo) error { } -// Execute runs this operations and returns an error if the operaiton did not execute successfully. +// Execute runs this operations and returns an error if the operation did not execute successfully. func (a *Aggregate) Execute(ctx context.Context) error { if a.deployment == nil { return errors.New("the Aggregate operation must have a Deployment set before Execute can be called") @@ -104,6 +106,7 @@ func (a *Aggregate) Execute(ctx context.Context) error { Crypt: a.crypt, MinimumWriteConcernWireVersion: 5, ServerAPI: a.serverAPI, + IsOutputAggregate: a.hasOutputStage, }.Execute(ctx, nil) } @@ -153,6 +156,9 @@ func (a *Aggregate) command(dst []byte, desc description.SelectedServer) ([]byte if a.let != nil { dst = bsoncore.AppendDocumentElement(dst, "let", a.let) } + for optionName, optionValue := range a.customOptions { + dst = bsoncore.AppendValueElement(dst, optionName, optionValue) + } cursorDoc, _ = bsoncore.AppendDocumentEnd(cursorDoc, cursorIdx) dst = bsoncore.AppendDocumentElement(dst, "cursor", cursorDoc) @@ -352,7 +358,7 @@ func (a *Aggregate) Retry(retry driver.RetryMode) *Aggregate { } // Crypt sets the Crypt object to use for automatic encryption and decryption. -func (a *Aggregate) Crypt(crypt *driver.Crypt) *Aggregate { +func (a *Aggregate) Crypt(crypt driver.Crypt) *Aggregate { if a == nil { a = new(Aggregate) } @@ -380,3 +386,24 @@ func (a *Aggregate) Let(let bsoncore.Document) *Aggregate { a.let = let return a } + +// HasOutputStage specifies whether the aggregate contains an output stage. Used in determining when to +// append read preference at the operation level. +func (a *Aggregate) HasOutputStage(hos bool) *Aggregate { + if a == nil { + a = new(Aggregate) + } + + a.hasOutputStage = hos + return a +} + +// CustomOptions specifies extra options to use in the aggregate command. +func (a *Aggregate) CustomOptions(co map[string]bsoncore.Value) *Aggregate { + if a == nil { + a = new(Aggregate) + } + + a.customOptions = co + return a +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.toml b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.toml deleted file mode 100644 index fd397b9c8..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.toml +++ /dev/null @@ -1,48 +0,0 @@ -version = 0 -name = "Aggregate" -documentation = "Performs an aggregate operation" -response.type = "batch cursor" - -[properties] -enabled = ["read concern", "read preference", "write concern"] -retryable = {mode = "once per command", type = "reads"} -MinimumWriteConcernWireVersion = 5 - -[command] -name = "aggregate" -parameter = "collection" -database = true - -[request.pipeline] -type = "array" -constructor = true -documentation = "Pipeline determines how data is transformed for an aggregation." - -[request.allowDiskUse] -type = "boolean" -documentation = "AllowDiskUse enables writing to temporary files. When true, aggregation stages can write to the dbPath/_tmp directory." - -[request.batchSize] -type = "int32" -documentation = "BatchSize specifies the number of documents to return in every batch." - -[request.bypassDocumentValidation] -type = "boolean" -documentation = "BypassDocumentValidation allows the write to opt-out of document level validation. This only applies when the $out stage is specified." - -[request.collation] -type = "document" -minWireVersionRequired = 5 -documentation = "Collation specifies a collation. This option is only valid for server versions 3.4 and above." - -[request.maxTimeMS] -type = "int64" -documentation = "MaxTimeMS specifies the maximum amount of time to allow the query to run." - -[request.comment] -type = "string" -documentation = "Comment specifies an arbitrary string to help trace the operation through the database profiler, currentOp, and logs." - -[request.hint] -type = "value" -documentation = "Hint specifies the index to use." diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go index f2a1ca460..07b21c567 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go @@ -1,4 +1,8 @@ -// NOTE: This file is maintained by hand because operationgen cannot generate it. +// Copyright (C) MongoDB, Inc. 2021-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 package operation @@ -28,9 +32,7 @@ type Command struct { monitor *event.CommandMonitor resultResponse bsoncore.Document resultCursor *driver.BatchCursor - srvr driver.Server - desc description.Server - crypt *driver.Crypt + crypt driver.Crypt serverAPI *driver.ServerAPIOptions createCursor bool cursorOpts driver.CursorOptions @@ -67,7 +69,7 @@ func (c *Command) ResultCursor() (*driver.BatchCursor, error) { return c.resultCursor, nil } -// Execute runs this operations and returns an error if the operaiton did not execute successfully. +// Execute runs this operations and returns an error if the operation did not execute successfully. func (c *Command) Execute(ctx context.Context) error { if c.deployment == nil { return errors.New("the Command operation must have a Deployment set before Execute can be called") @@ -185,7 +187,7 @@ func (c *Command) ServerSelector(selector description.ServerSelector) *Command { } // Crypt sets the Crypt object to use for automatic encryption and decryption. -func (c *Command) Crypt(crypt *driver.Crypt) *Command { +func (c *Command) Crypt(crypt driver.Crypt) *Command { if c == nil { c = new(Command) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/commit_transaction.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/commit_transaction.go index c56052506..14ed7bcd8 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/commit_transaction.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/commit_transaction.go @@ -4,8 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Code generated by operationgen. DO NOT EDIT. - package operation import ( @@ -27,7 +25,7 @@ type CommitTransaction struct { session *session.Client clock *session.ClusterClock monitor *event.CommandMonitor - crypt *driver.Crypt + crypt driver.Crypt database string deployment driver.Deployment selector description.ServerSelector @@ -46,7 +44,7 @@ func (ct *CommitTransaction) processResponse(driver.ResponseInfo) error { return err } -// Execute runs this operations and returns an error if the operaiton did not execute successfully. +// Execute runs this operations and returns an error if the operation did not execute successfully. func (ct *CommitTransaction) Execute(ctx context.Context) error { if ct.deployment == nil { return errors.New("the CommitTransaction operation must have a Deployment set before Execute can be called") @@ -133,7 +131,7 @@ func (ct *CommitTransaction) CommandMonitor(monitor *event.CommandMonitor) *Comm } // Crypt sets the Crypt object to use for automatic encryption and decryption. -func (ct *CommitTransaction) Crypt(crypt *driver.Crypt) *CommitTransaction { +func (ct *CommitTransaction) Crypt(crypt driver.Crypt) *CommitTransaction { if ct == nil { ct = new(CommitTransaction) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/commit_transaction.toml b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/commit_transaction.toml deleted file mode 100644 index c49dbd80d..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/commit_transaction.toml +++ /dev/null @@ -1,22 +0,0 @@ -version = 0 -name = "CommitTransaction" -documentation = "CommitTransaction attempts to commit a transaction." - -[properties] -enabled = ["write concern"] -disabled = ["collection"] -retryable = {mode = "once per command", type = "writes"} - -[command] -name = "commitTransaction" -parameter = "database" - -[request.recoveryToken] -type = "document" -documentation = """ -RecoveryToken sets the recovery token to use when committing or aborting a sharded transaction.\ -""" - -[request.maxTimeMS] -type = "int64" -documentation = "MaxTimeMS specifies the maximum amount of time to allow the query to run." diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/count.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/count.go index f38d6aa3d..8404f3642 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/count.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/count.go @@ -4,8 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Code generated by operationgen. DO NOT EDIT. - package operation import ( @@ -22,7 +20,7 @@ import ( "go.mongodb.org/mongo-driver/x/mongo/driver/session" ) -// Performs a count operation +// Count represents a count operation. type Count struct { maxTimeMS *int64 query bsoncore.Document @@ -30,7 +28,7 @@ type Count struct { clock *session.ClusterClock collection string monitor *event.CommandMonitor - crypt *driver.Crypt + crypt driver.Crypt database string deployment driver.Deployment readConcern *readconcern.ReadConcern @@ -41,52 +39,50 @@ type Count struct { serverAPI *driver.ServerAPIOptions } +// CountResult represents a count result returned by the server. type CountResult struct { // The number of documents found N int64 } -func buildCountResult(response bsoncore.Document, srvr driver.Server) (CountResult, error) { +func buildCountResult(response bsoncore.Document) (CountResult, error) { elements, err := response.Elements() if err != nil { return CountResult{}, err } cr := CountResult{} -elementLoop: for _, element := range elements { switch element.Key() { case "n": // for count using original command var ok bool cr.N, ok = element.Value().AsInt64OK() if !ok { - err = fmt.Errorf("response field 'n' is type int64, but received BSON type %s", + return cr, fmt.Errorf("response field 'n' is type int64, but received BSON type %s", element.Value().Type) - break elementLoop } case "cursor": // for count using aggregate with $collStats firstBatch, err := element.Value().Document().LookupErr("firstBatch") if err != nil { - break elementLoop + return cr, err } // get count value from first batch val := firstBatch.Array().Index(0) count, err := val.Document().LookupErr("n") if err != nil { - break elementLoop + return cr, err } // use count as Int64 for result var ok bool cr.N, ok = count.AsInt64OK() if !ok { - err = fmt.Errorf("response field 'n' is type int64, but received BSON type %s", + return cr, fmt.Errorf("response field 'n' is type int64, but received BSON type %s", element.Value().Type) - break elementLoop } } } - return cr, err + return cr, nil } // NewCount constructs and returns a new Count. @@ -99,11 +95,11 @@ func (c *Count) Result() CountResult { return c.result } func (c *Count) processResponse(info driver.ResponseInfo) error { var err error - c.result, err = buildCountResult(info.ServerResponse, info.Server) + c.result, err = buildCountResult(info.ServerResponse) return err } -// Execute runs this operations and returns an error if the operaiton did not execute successfully. +// Execute runs this operations and returns an error if the operation did not execute successfully. func (c *Count) Execute(ctx context.Context) error { if c.deployment == nil { return errors.New("the Count operation must have a Deployment set before Execute can be called") @@ -238,7 +234,7 @@ func (c *Count) CommandMonitor(monitor *event.CommandMonitor) *Count { } // Crypt sets the Crypt object to use for automatic encryption and decryption. -func (c *Count) Crypt(crypt *driver.Crypt) *Count { +func (c *Count) Crypt(crypt driver.Crypt) *Count { if c == nil { c = new(Count) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/count.toml b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/count.toml deleted file mode 100644 index f5d5e0326..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/count.toml +++ /dev/null @@ -1,26 +0,0 @@ -version = 0 -name = "Count" -documentation = "Performs a count operation" - -[properties] -enabled = ["read concern", "read preference"] -retryable = {mode = "once per command", type = "reads"} - -[command] -name = "count" -parameter = "collection" - -[request.maxTimeMS] -type = "int64" -documentation = "MaxTimeMS specifies the maximum amount of time to allow the query to run." - -[request.query] -type = "document" -documentation = "Query determines what results are returned from find." - -[response] -name = "CountResult" - -[response.field.n] -type = "int64" -documentation = "The number of documents found" diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create.go index a06fac7e1..48fee42b9 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create.go @@ -4,8 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Code generated by operationgen. DO NOT EDIT. - package operation import ( @@ -20,7 +18,7 @@ import ( "go.mongodb.org/mongo-driver/x/mongo/driver/session" ) -// Create a create operation +// Create represents a create operation. type Create struct { capped *bool collation bsoncore.Document @@ -37,7 +35,7 @@ type Create struct { session *session.Client clock *session.ClusterClock monitor *event.CommandMonitor - crypt *driver.Crypt + crypt driver.Crypt database string deployment driver.Deployment selector description.ServerSelector @@ -55,11 +53,10 @@ func NewCreate(collectionName string) *Create { } func (c *Create) processResponse(driver.ResponseInfo) error { - var err error - return err + return nil } -// Execute runs this operations and returns an error if the operaiton did not execute successfully. +// Execute runs this operations and returns an error if the operation did not execute successfully. func (c *Create) Execute(ctx context.Context) error { if c.deployment == nil { return errors.New("the Create operation must have a Deployment set before Execute can be called") @@ -130,7 +127,7 @@ func (c *Create) command(dst []byte, desc description.SelectedServer) ([]byte, e return dst, nil } -// Specifies if the collection is capped. +// Capped specifies if the collection is capped. func (c *Create) Capped(capped bool) *Create { if c == nil { c = new(Create) @@ -150,7 +147,7 @@ func (c *Create) Collation(collation bsoncore.Document) *Create { return c } -// Specifies the name of the collection to create. +// CollectionName specifies the name of the collection to create. func (c *Create) CollectionName(collectionName string) *Create { if c == nil { c = new(Create) @@ -160,7 +157,7 @@ func (c *Create) CollectionName(collectionName string) *Create { return c } -// Specifies a default configuration for indexes on the collection. +// IndexOptionDefaults specifies a default configuration for indexes on the collection. func (c *Create) IndexOptionDefaults(indexOptionDefaults bsoncore.Document) *Create { if c == nil { c = new(Create) @@ -170,7 +167,7 @@ func (c *Create) IndexOptionDefaults(indexOptionDefaults bsoncore.Document) *Cre return c } -// Specifies the maximum number of documents allowed in a capped collection. +// Max specifies the maximum number of documents allowed in a capped collection. func (c *Create) Max(max int64) *Create { if c == nil { c = new(Create) @@ -180,7 +177,7 @@ func (c *Create) Max(max int64) *Create { return c } -// Specifies the agggregtion pipeline to be run against the source to create the view. +// Pipeline specifies the agggregtion pipeline to be run against the source to create the view. func (c *Create) Pipeline(pipeline bsoncore.Document) *Create { if c == nil { c = new(Create) @@ -190,7 +187,7 @@ func (c *Create) Pipeline(pipeline bsoncore.Document) *Create { return c } -// Specifies the maximum size in bytes for a capped collection. +// Size specifies the maximum size in bytes for a capped collection. func (c *Create) Size(size int64) *Create { if c == nil { c = new(Create) @@ -200,7 +197,7 @@ func (c *Create) Size(size int64) *Create { return c } -// Specifies the storage engine to use for the index. +// StorageEngine specifies the storage engine to use for the index. func (c *Create) StorageEngine(storageEngine bsoncore.Document) *Create { if c == nil { c = new(Create) @@ -210,7 +207,7 @@ func (c *Create) StorageEngine(storageEngine bsoncore.Document) *Create { return c } -// Specifies what should happen if a document being inserted does not pass validation. +// ValidationAction specifies what should happen if a document being inserted does not pass validation. func (c *Create) ValidationAction(validationAction string) *Create { if c == nil { c = new(Create) @@ -220,7 +217,8 @@ func (c *Create) ValidationAction(validationAction string) *Create { return c } -// Specifies how strictly the server applies validation rules to existing documents in the collection during update operations. +// ValidationLevel specifies how strictly the server applies validation rules to existing documents in the collection +// during update operations. func (c *Create) ValidationLevel(validationLevel string) *Create { if c == nil { c = new(Create) @@ -230,7 +228,7 @@ func (c *Create) ValidationLevel(validationLevel string) *Create { return c } -// Specifies validation rules for the collection. +// Validator specifies validation rules for the collection. func (c *Create) Validator(validator bsoncore.Document) *Create { if c == nil { c = new(Create) @@ -240,7 +238,7 @@ func (c *Create) Validator(validator bsoncore.Document) *Create { return c } -// Specifies the name of the source collection or view on which the view will be created. +// ViewOn specifies the name of the source collection or view on which the view will be created. func (c *Create) ViewOn(viewOn string) *Create { if c == nil { c = new(Create) @@ -281,7 +279,7 @@ func (c *Create) CommandMonitor(monitor *event.CommandMonitor) *Create { } // Crypt sets the Crypt object to use for automatic encryption and decryption. -func (c *Create) Crypt(crypt *driver.Crypt) *Create { +func (c *Create) Crypt(crypt driver.Crypt) *Create { if c == nil { c = new(Create) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create.toml b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create.toml deleted file mode 100644 index c1e29cfc5..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create.toml +++ /dev/null @@ -1,62 +0,0 @@ -version = 0 -name = "Create" -documentation = "Create a create operation" - -[properties] -enabled = ["write concern"] -disabled = ["collection"] - -[command] -name = "create" -parameter = "collectionName" - -[request.collectionName] -type = "string" -documentation = "Specifies the name of the collection to create." -skip = true -constructor = true - -[request.capped] -type = "boolean" -documentation = "Specifies if the collection is capped." - -[request.collation] -type = "document" -minWireVersionRequired = 5 -documentation = "Collation specifies a collation. This option is only valid for server versions 3.4 and above." - -[request.indexOptionDefaults] -type = "document" -documentation = "Specifies a default configuration for indexes on the collection." - -[request.max] -type = "int64" -documentation = "Specifies the maximum number of documents allowed in a capped collection." - -[request.pipeline] -type = "array" -documentation = "Specifies the agggregtion pipeline to be run against the source to create the view." - -[request.size] -type = "int64" -documentation = "Specifies the maximum size in bytes for a capped collection." - -[request.storageEngine] -type = "document" -documentation = "Specifies the storage engine to use for the index." - -[request.validator] -type = "document" -documentation = "Specifies validation rules for the collection." - -[request.validationAction] -type = "string" -documentation = "Specifies what should happen if a document being inserted does not pass validation." - -[request.validationLevel] -type = "string" -documentation = "Specifies how strictly the server applies validation rules to existing documents in the collection during update operations." - -[request.viewOn] -type = "string" -documentation = "Specifies the name of the source collection or view on which the view will be created." diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/createIndexes.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/createIndexes.go index 63d3fc9b7..6bf98deea 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/createIndexes.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/createIndexes.go @@ -4,8 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Code generated by operationgen. DO NOT EDIT. - package operation import ( @@ -31,7 +29,7 @@ type CreateIndexes struct { clock *session.ClusterClock collection string monitor *event.CommandMonitor - crypt *driver.Crypt + crypt driver.Crypt database string deployment driver.Deployment selector description.ServerSelector @@ -40,6 +38,7 @@ type CreateIndexes struct { serverAPI *driver.ServerAPIOptions } +// CreateIndexesResult represents a createIndexes result returned by the server. type CreateIndexesResult struct { // If the collection was created automatically. CreatedCollectionAutomatically bool @@ -49,7 +48,7 @@ type CreateIndexesResult struct { IndexesBefore int32 } -func buildCreateIndexesResult(response bsoncore.Document, srvr driver.Server) (CreateIndexesResult, error) { +func buildCreateIndexesResult(response bsoncore.Document) (CreateIndexesResult, error) { elements, err := response.Elements() if err != nil { return CreateIndexesResult{}, err @@ -61,19 +60,19 @@ func buildCreateIndexesResult(response bsoncore.Document, srvr driver.Server) (C var ok bool cir.CreatedCollectionAutomatically, ok = element.Value().BooleanOK() if !ok { - err = fmt.Errorf("response field 'createdCollectionAutomatically' is type bool, but received BSON type %s", element.Value().Type) + return cir, fmt.Errorf("response field 'createdCollectionAutomatically' is type bool, but received BSON type %s", element.Value().Type) } case "indexesAfter": var ok bool cir.IndexesAfter, ok = element.Value().AsInt32OK() if !ok { - err = fmt.Errorf("response field 'indexesAfter' is type int32, but received BSON type %s", element.Value().Type) + return cir, fmt.Errorf("response field 'indexesAfter' is type int32, but received BSON type %s", element.Value().Type) } case "indexesBefore": var ok bool cir.IndexesBefore, ok = element.Value().AsInt32OK() if !ok { - err = fmt.Errorf("response field 'indexesBefore' is type int32, but received BSON type %s", element.Value().Type) + return cir, fmt.Errorf("response field 'indexesBefore' is type int32, but received BSON type %s", element.Value().Type) } } } @@ -92,11 +91,11 @@ func (ci *CreateIndexes) Result() CreateIndexesResult { return ci.result } func (ci *CreateIndexes) processResponse(info driver.ResponseInfo) error { var err error - ci.result, err = buildCreateIndexesResult(info.ServerResponse, info.Server) + ci.result, err = buildCreateIndexesResult(info.ServerResponse) return err } -// Execute runs this operations and returns an error if the operaiton did not execute successfully. +// Execute runs this operations and returns an error if the operation did not execute successfully. func (ci *CreateIndexes) Execute(ctx context.Context) error { if ci.deployment == nil { return errors.New("the CreateIndexes operation must have a Deployment set before Execute can be called") @@ -135,9 +134,9 @@ func (ci *CreateIndexes) command(dst []byte, desc description.SelectedServer) ([ return dst, nil } -// The number of data-bearing members of a replica set, including the primary, that must complete the index builds -// successfully before the primary marks the indexes as ready. This should either be a string or int32 value. -// +// CommitQuorum specifies the number of data-bearing members of a replica set, including the primary, that must +// complete the index builds successfully before the primary marks the indexes as ready. This should either be a +// string or int32 value. func (ci *CreateIndexes) CommitQuorum(commitQuorum bsoncore.Value) *CreateIndexes { if ci == nil { ci = new(CreateIndexes) @@ -147,7 +146,7 @@ func (ci *CreateIndexes) CommitQuorum(commitQuorum bsoncore.Value) *CreateIndexe return ci } -// An array containing index specification documents for the indexes being created. +// Indexes specifies an array containing index specification documents for the indexes being created. func (ci *CreateIndexes) Indexes(indexes bsoncore.Document) *CreateIndexes { if ci == nil { ci = new(CreateIndexes) @@ -208,7 +207,7 @@ func (ci *CreateIndexes) CommandMonitor(monitor *event.CommandMonitor) *CreateIn } // Crypt sets the Crypt object to use for automatic encryption and decryption. -func (ci *CreateIndexes) Crypt(crypt *driver.Crypt) *CreateIndexes { +func (ci *CreateIndexes) Crypt(crypt driver.Crypt) *CreateIndexes { if ci == nil { ci = new(CreateIndexes) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/createIndexes.toml b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/createIndexes.toml deleted file mode 100644 index 43373030c..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/createIndexes.toml +++ /dev/null @@ -1,42 +0,0 @@ -version = 0 -name = "CreateIndexes" -documentation = "CreateIndexes performs a createIndexes operation." - -[properties] -enabled = ["write concern"] - -[command] -name = "createIndexes" -parameter = "collection" - -[request.indexes] -type = "array" -constructor = true -documentation = "An array containing index specification documents for the indexes being created." - -[request.maxTimeMS] -type = "int64" -documentation = "MaxTimeMS specifies the maximum amount of time to allow the query to run." - -[request.commitQuorum] -type = "value" -minWireVersionRequired = 9 -documentation = """ -The number of data-bearing members of a replica set, including the primary, that must complete the index builds -successfully before the primary marks the indexes as ready. This should either be a string or int32 value. -""" - -[response] -name = "CreateIndexesResult" - -[response.field.createdCollectionAutomatically] -type = "boolean" -documentation = "If the collection was created automatically." - -[response.field.indexesBefore] -type = "int32" -documentation = "The number of indexes existing before this command." - -[response.field.indexesAfter] -type = "int32" -documentation = "The number of indexes existing after this command." diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/delete.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/delete.go index c221f8a3c..beb893c71 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/delete.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/delete.go @@ -4,8 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Code generated by operationgen. DO NOT EDIT. - package operation import ( @@ -29,7 +27,7 @@ type Delete struct { clock *session.ClusterClock collection string monitor *event.CommandMonitor - crypt *driver.Crypt + crypt driver.Crypt database string deployment driver.Deployment selector description.ServerSelector @@ -38,14 +36,16 @@ type Delete struct { hint *bool result DeleteResult serverAPI *driver.ServerAPIOptions + let bsoncore.Document } +// DeleteResult represents a delete result returned by the server. type DeleteResult struct { // Number of documents successfully deleted. - N int32 + N int64 } -func buildDeleteResult(response bsoncore.Document, srvr driver.Server) (DeleteResult, error) { +func buildDeleteResult(response bsoncore.Document) (DeleteResult, error) { elements, err := response.Elements() if err != nil { return DeleteResult{}, err @@ -55,9 +55,9 @@ func buildDeleteResult(response bsoncore.Document, srvr driver.Server) (DeleteRe switch element.Key() { case "n": var ok bool - dr.N, ok = element.Value().AsInt32OK() + dr.N, ok = element.Value().AsInt64OK() if !ok { - err = fmt.Errorf("response field 'n' is type int32, but received BSON type %s", element.Value().Type) + return dr, fmt.Errorf("response field 'n' is type int32 or int64, but received BSON type %s", element.Value().Type) } } } @@ -75,12 +75,12 @@ func NewDelete(deletes ...bsoncore.Document) *Delete { func (d *Delete) Result() DeleteResult { return d.result } func (d *Delete) processResponse(info driver.ResponseInfo) error { - dr, err := buildDeleteResult(info.ServerResponse, info.Server) + dr, err := buildDeleteResult(info.ServerResponse) d.result.N += dr.N return err } -// Execute runs this operations and returns an error if the operaiton did not execute successfully. +// Execute runs this operations and returns an error if the operation did not execute successfully. func (d *Delete) Execute(ctx context.Context) error { if d.deployment == nil { return errors.New("the Delete operation must have a Deployment set before Execute can be called") @@ -123,6 +123,9 @@ func (d *Delete) command(dst []byte, desc description.SelectedServer) ([]byte, e return nil, errUnacknowledgedHint } } + if d.let != nil { + dst = bsoncore.AppendDocumentElement(dst, "let", d.let) + } return dst, nil } @@ -190,7 +193,7 @@ func (d *Delete) CommandMonitor(monitor *event.CommandMonitor) *Delete { } // Crypt sets the Crypt object to use for automatic encryption and decryption. -func (d *Delete) Crypt(crypt *driver.Crypt) *Delete { +func (d *Delete) Crypt(crypt driver.Crypt) *Delete { if d == nil { d = new(Delete) } @@ -271,3 +274,13 @@ func (d *Delete) ServerAPI(serverAPI *driver.ServerAPIOptions) *Delete { d.serverAPI = serverAPI return d } + +// Let specifies the let document to use. This option is only valid for server versions 5.0 and above. +func (d *Delete) Let(let bsoncore.Document) *Delete { + if d == nil { + d = new(Delete) + } + + d.let = let + return d +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/delete.toml b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/delete.toml deleted file mode 100644 index dea0a7120..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/delete.toml +++ /dev/null @@ -1,47 +0,0 @@ -version = 0 -name = "Delete" -documentation = "Delete performs a delete operation" - -[properties] -enabled = ["write concern"] -retryable = {mode = "once per command", type = "writes"} -batches = "deletes" - -[command] -name = "delete" -parameter = "collection" - -[request.deletes] -type = "document" -slice = true -constructor = true -variadic = true -required = true -documentation = """ -Deletes adds documents to this operation that will be used to determine what documents to delete when this operation -is executed. These documents should have the form {q: , limit: , collation: }. The -collation field is optional. If limit is 0, there will be no limit on the number of documents deleted.\ -""" - -[request.ordered] -type = "boolean" -documentation = """ -Ordered sets ordered. If true, when a write fails, the operation will return the error, when -false write failures do not stop execution of the operation.\ -""" - -[request.hint] -type = "boolean" -minWireVersionRequired = 5 -documentation = """ -Hint is a flag to indicate that the update document contains a hint. Hint is only supported by -servers >= 4.4. Older servers >= 3.4 will report an error for using the hint option. For servers < -3.4, the driver will return an error if the hint option is used.\ -""" - -[response] -name = "DeleteResult" - -[response.field.n] -type = "int32" -documentation = "Number of documents successfully deleted." diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/distinct.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/distinct.go index a233b9afc..4591c1ada 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/distinct.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/distinct.go @@ -4,8 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Code generated by operationgen. DO NOT EDIT. - package operation import ( @@ -31,7 +29,7 @@ type Distinct struct { clock *session.ClusterClock collection string monitor *event.CommandMonitor - crypt *driver.Crypt + crypt driver.Crypt database string deployment driver.Deployment readConcern *readconcern.ReadConcern @@ -42,12 +40,13 @@ type Distinct struct { serverAPI *driver.ServerAPIOptions } +// DistinctResult represents a distinct result returned by the server. type DistinctResult struct { // The distinct values for the field. Values bsoncore.Value } -func buildDistinctResult(response bsoncore.Document, srvr driver.Server) (DistinctResult, error) { +func buildDistinctResult(response bsoncore.Document) (DistinctResult, error) { elements, err := response.Elements() if err != nil { return DistinctResult{}, err @@ -75,11 +74,11 @@ func (d *Distinct) Result() DistinctResult { return d.result } func (d *Distinct) processResponse(info driver.ResponseInfo) error { var err error - d.result, err = buildDistinctResult(info.ServerResponse, info.Server) + d.result, err = buildDistinctResult(info.ServerResponse) return err } -// Execute runs this operations and returns an error if the operaiton did not execute successfully. +// Execute runs this operations and returns an error if the operation did not execute successfully. func (d *Distinct) Execute(ctx context.Context) error { if d.deployment == nil { return errors.New("the Distinct operation must have a Deployment set before Execute can be called") @@ -205,7 +204,7 @@ func (d *Distinct) CommandMonitor(monitor *event.CommandMonitor) *Distinct { } // Crypt sets the Crypt object to use for automatic encryption and decryption. -func (d *Distinct) Crypt(crypt *driver.Crypt) *Distinct { +func (d *Distinct) Crypt(crypt driver.Crypt) *Distinct { if d == nil { d = new(Distinct) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/distinct.toml b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/distinct.toml deleted file mode 100644 index 81849d564..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/distinct.toml +++ /dev/null @@ -1,37 +0,0 @@ -version = 0 -name = "Distinct" -documentation = "Distinct performs a distinct operation." - -[properties] -enabled = ["read concern", "read preference"] -retryable = {mode = "once per command", type = "reads"} - -[command] -name = "distinct" -parameter = "collection" - -[request.key] -type = "string" -constructor = true -documentation = "Key specifies which field to return distinct values for." - -[request.query] -type = "document" -constructor = true -documentation = "Query specifies which documents to return distinct values from." - -[request.maxTimeMS] -type = "int64" -documentation = "MaxTimeMS specifies the maximum amount of time to allow the query to run." - -[request.collation] -type = "document" -minWireVersionRequired = 5 -documentation = "Collation specifies a collation to be used." - -[response] -name = "DistinctResult" - -[response.field.values] -type = "value" -documentation = "The distinct values for the field." \ No newline at end of file diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_collection.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_collection.go index 769e486eb..fab279dc9 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_collection.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_collection.go @@ -4,8 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Code generated by operationgen. DO NOT EDIT. - package operation import ( @@ -27,7 +25,7 @@ type DropCollection struct { clock *session.ClusterClock collection string monitor *event.CommandMonitor - crypt *driver.Crypt + crypt driver.Crypt database string deployment driver.Deployment selector description.ServerSelector @@ -36,6 +34,7 @@ type DropCollection struct { serverAPI *driver.ServerAPIOptions } +// DropCollectionResult represents a dropCollection result returned by the server. type DropCollectionResult struct { // The number of indexes in the dropped collection. NIndexesWas int32 @@ -43,7 +42,7 @@ type DropCollectionResult struct { Ns string } -func buildDropCollectionResult(response bsoncore.Document, srvr driver.Server) (DropCollectionResult, error) { +func buildDropCollectionResult(response bsoncore.Document) (DropCollectionResult, error) { elements, err := response.Elements() if err != nil { return DropCollectionResult{}, err @@ -55,13 +54,13 @@ func buildDropCollectionResult(response bsoncore.Document, srvr driver.Server) ( var ok bool dcr.NIndexesWas, ok = element.Value().AsInt32OK() if !ok { - err = fmt.Errorf("response field 'nIndexesWas' is type int32, but received BSON type %s", element.Value().Type) + return dcr, fmt.Errorf("response field 'nIndexesWas' is type int32, but received BSON type %s", element.Value().Type) } case "ns": var ok bool dcr.Ns, ok = element.Value().StringValueOK() if !ok { - err = fmt.Errorf("response field 'ns' is type string, but received BSON type %s", element.Value().Type) + return dcr, fmt.Errorf("response field 'ns' is type string, but received BSON type %s", element.Value().Type) } } } @@ -78,11 +77,11 @@ func (dc *DropCollection) Result() DropCollectionResult { return dc.result } func (dc *DropCollection) processResponse(info driver.ResponseInfo) error { var err error - dc.result, err = buildDropCollectionResult(info.ServerResponse, info.Server) + dc.result, err = buildDropCollectionResult(info.ServerResponse) return err } -// Execute runs this operations and returns an error if the operaiton did not execute successfully. +// Execute runs this operations and returns an error if the operation did not execute successfully. func (dc *DropCollection) Execute(ctx context.Context) error { if dc.deployment == nil { return errors.New("the DropCollection operation must have a Deployment set before Execute can be called") @@ -150,7 +149,7 @@ func (dc *DropCollection) CommandMonitor(monitor *event.CommandMonitor) *DropCol } // Crypt sets the Crypt object to use for automatic encryption and decryption. -func (dc *DropCollection) Crypt(crypt *driver.Crypt) *DropCollection { +func (dc *DropCollection) Crypt(crypt driver.Crypt) *DropCollection { if dc == nil { dc = new(DropCollection) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_collection.toml b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_collection.toml deleted file mode 100644 index b98db33ec..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_collection.toml +++ /dev/null @@ -1,21 +0,0 @@ -version = 0 -name = "DropCollection" -documentation = "DropCollection performs a drop operation." - -[command] -name = "drop" -parameter = "collection" - -[properties] -enabled = ["write concern"] - -[response] -name = "DropCollectionResult" - -[response.field.ns] -type = "string" -documentation = "The namespace of the dropped collection." - -[response.field.nIndexesWas] -type = "int32" -documentation = "The number of indexes in the dropped collection." diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_database.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_database.go index 47bf4f871..be2220c6f 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_database.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_database.go @@ -4,8 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Code generated by operationgen. DO NOT EDIT. - package operation import ( @@ -25,7 +23,7 @@ type DropDatabase struct { session *session.Client clock *session.ClusterClock monitor *event.CommandMonitor - crypt *driver.Crypt + crypt driver.Crypt database string deployment driver.Deployment selector description.ServerSelector @@ -38,7 +36,7 @@ func NewDropDatabase() *DropDatabase { return &DropDatabase{} } -// Execute runs this operations and returns an error if the operaiton did not execute successfully. +// Execute runs this operations and returns an error if the operation did not execute successfully. func (dd *DropDatabase) Execute(ctx context.Context) error { if dd.deployment == nil { return errors.New("the DropDatabase operation must have a Deployment set before Execute can be called") @@ -96,7 +94,7 @@ func (dd *DropDatabase) CommandMonitor(monitor *event.CommandMonitor) *DropDatab } // Crypt sets the Crypt object to use for automatic encryption and decryption. -func (dd *DropDatabase) Crypt(crypt *driver.Crypt) *DropDatabase { +func (dd *DropDatabase) Crypt(crypt driver.Crypt) *DropDatabase { if dd == nil { dd = new(DropDatabase) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_database.toml b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_database.toml deleted file mode 100644 index 9c576ea0a..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_database.toml +++ /dev/null @@ -1,18 +0,0 @@ -version = 0 -name = "DropDatabase" -documentation = "DropDatabase performs a dropDatabase operation" - -[properties] -enabled = ["write concern"] -disabled = ["collection"] - -[command] -name = "dropDatabase" -parameter = "database" - -[response] -name = "DropDatabaseResult" - -[response.field.dropped] -type = "string" -documentation = "The dropped database." \ No newline at end of file diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_indexes.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_indexes.go index fa53a0f04..127f39518 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_indexes.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_indexes.go @@ -4,8 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Code generated by operationgen. DO NOT EDIT. - package operation import ( @@ -29,7 +27,7 @@ type DropIndexes struct { clock *session.ClusterClock collection string monitor *event.CommandMonitor - crypt *driver.Crypt + crypt driver.Crypt database string deployment driver.Deployment selector description.ServerSelector @@ -38,12 +36,13 @@ type DropIndexes struct { serverAPI *driver.ServerAPIOptions } +// DropIndexesResult represents a dropIndexes result returned by the server. type DropIndexesResult struct { // Number of indexes that existed before the drop was executed. NIndexesWas int32 } -func buildDropIndexesResult(response bsoncore.Document, srvr driver.Server) (DropIndexesResult, error) { +func buildDropIndexesResult(response bsoncore.Document) (DropIndexesResult, error) { elements, err := response.Elements() if err != nil { return DropIndexesResult{}, err @@ -55,7 +54,7 @@ func buildDropIndexesResult(response bsoncore.Document, srvr driver.Server) (Dro var ok bool dir.NIndexesWas, ok = element.Value().AsInt32OK() if !ok { - err = fmt.Errorf("response field 'nIndexesWas' is type int32, but received BSON type %s", element.Value().Type) + return dir, fmt.Errorf("response field 'nIndexesWas' is type int32, but received BSON type %s", element.Value().Type) } } } @@ -74,11 +73,11 @@ func (di *DropIndexes) Result() DropIndexesResult { return di.result } func (di *DropIndexes) processResponse(info driver.ResponseInfo) error { var err error - di.result, err = buildDropIndexesResult(info.ServerResponse, info.Server) + di.result, err = buildDropIndexesResult(info.ServerResponse) return err } -// Execute runs this operations and returns an error if the operaiton did not execute successfully. +// Execute runs this operations and returns an error if the operation did not execute successfully. func (di *DropIndexes) Execute(ctx context.Context) error { if di.deployment == nil { return errors.New("the DropIndexes operation must have a Deployment set before Execute can be called") @@ -173,7 +172,7 @@ func (di *DropIndexes) CommandMonitor(monitor *event.CommandMonitor) *DropIndexe } // Crypt sets the Crypt object to use for automatic encryption and decryption. -func (di *DropIndexes) Crypt(crypt *driver.Crypt) *DropIndexes { +func (di *DropIndexes) Crypt(crypt driver.Crypt) *DropIndexes { if di == nil { di = new(DropIndexes) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_indexes.toml b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_indexes.toml deleted file mode 100644 index 8eafe7db0..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_indexes.toml +++ /dev/null @@ -1,28 +0,0 @@ -version = 0 -name = "DropIndexes" -documentation = "DropIndexes performs an dropIndexes operation." - -[properties] -enabled = ["write concern"] - -[command] -name = "dropIndexes" -parameter = "collection" - -[request.index] -type = "string" -constructor = true -documentation = """ -Index specifies the name of the index to drop. If '*' is specified, all indexes will be dropped. -""" - -[request.maxTimeMS] -type = "int64" -documentation = "MaxTimeMS specifies the maximum amount of time to allow the query to run." - -[response] -name = "DropIndexesResult" - -[response.field.nIndexesWas] -type = "int32" -documentation = "Number of indexes that existed before the drop was executed." diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/end_sessions.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/end_sessions.go index 943818019..8384fb2d7 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/end_sessions.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/end_sessions.go @@ -4,8 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Code generated by operationgen. DO NOT EDIT. - package operation import ( @@ -25,7 +23,7 @@ type EndSessions struct { session *session.Client clock *session.ClusterClock monitor *event.CommandMonitor - crypt *driver.Crypt + crypt driver.Crypt database string deployment driver.Deployment selector description.ServerSelector @@ -44,7 +42,7 @@ func (es *EndSessions) processResponse(driver.ResponseInfo) error { return err } -// Execute runs this operations and returns an error if the operaiton did not execute successfully. +// Execute runs this operations and returns an error if the operation did not execute successfully. func (es *EndSessions) Execute(ctx context.Context) error { if es.deployment == nil { return errors.New("the EndSessions operation must have a Deployment set before Execute can be called") @@ -72,7 +70,7 @@ func (es *EndSessions) command(dst []byte, desc description.SelectedServer) ([]b return dst, nil } -// sessionIDs specify the sessions to be expired. +// SessionIDs specifies the sessions to be expired. func (es *EndSessions) SessionIDs(sessionIDs bsoncore.Document) *EndSessions { if es == nil { es = new(EndSessions) @@ -113,7 +111,7 @@ func (es *EndSessions) CommandMonitor(monitor *event.CommandMonitor) *EndSession } // Crypt sets the Crypt object to use for automatic encryption and decryption. -func (es *EndSessions) Crypt(crypt *driver.Crypt) *EndSessions { +func (es *EndSessions) Crypt(crypt driver.Crypt) *EndSessions { if es == nil { es = new(EndSessions) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/end_sessions.toml b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/end_sessions.toml deleted file mode 100644 index 92910548b..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/end_sessions.toml +++ /dev/null @@ -1,16 +0,0 @@ -version = 0 -name = "EndSessions" -documentation = "EndSessions performs an endSessions operation." - -[properties] -disabled = ["collection"] - -[command] -name = "endSessions" -parameter = "sessionIDs" - -[request.sessionIDs] -type = "array" -documentation = "sessionIDs specify the sessions to be expired." -skip = true -constructor = true diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.go index 8e3e0f1c5..3689a34b2 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.go @@ -4,8 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Code generated by operationgen. DO NOT EDIT. - package operation import ( @@ -32,6 +30,7 @@ type Find struct { comment *string filter bsoncore.Document hint bsoncore.Value + let bsoncore.Document limit *int64 max bsoncore.Document maxTimeMS *int64 @@ -50,7 +49,7 @@ type Find struct { clock *session.ClusterClock collection string monitor *event.CommandMonitor - crypt *driver.Crypt + crypt driver.Crypt database string deployment driver.Deployment readConcern *readconcern.ReadConcern @@ -80,7 +79,7 @@ func (f *Find) processResponse(info driver.ResponseInfo) error { return err } -// Execute runs this operations and returns an error if the operaiton did not execute successfully. +// Execute runs this operations and returns an error if the operation did not execute successfully. func (f *Find) Execute(ctx context.Context) error { if f.deployment == nil { return errors.New("the Find operation must have a Deployment set before Execute can be called") @@ -138,6 +137,9 @@ func (f *Find) command(dst []byte, desc description.SelectedServer) ([]byte, err if f.hint.Type != bsontype.Type(0) { dst = bsoncore.AppendValueElement(dst, "hint", f.hint) } + if f.let != nil { + dst = bsoncore.AppendDocumentElement(dst, "let", f.let) + } if f.limit != nil { dst = bsoncore.AppendInt64Element(dst, "limit", *f.limit) } @@ -263,6 +265,16 @@ func (f *Find) Hint(hint bsoncore.Value) *Find { return f } +// Let specifies the let document to use. This option is only valid for server versions 5.0 and above. +func (f *Find) Let(let bsoncore.Document) *Find { + if f == nil { + f = new(Find) + } + + f.let = let + return f +} + // Limit sets a limit on the number of documents to return. func (f *Find) Limit(limit int64) *Find { if f == nil { @@ -323,7 +335,7 @@ func (f *Find) OplogReplay(oplogReplay bool) *Find { return f } -// Project limits the fields returned for all documents. +// Projection limits the fields returned for all documents. func (f *Find) Projection(projection bsoncore.Document) *Find { if f == nil { f = new(Find) @@ -444,7 +456,7 @@ func (f *Find) CommandMonitor(monitor *event.CommandMonitor) *Find { } // Crypt sets the Crypt object to use for automatic encryption and decryption. -func (f *Find) Crypt(crypt *driver.Crypt) *Find { +func (f *Find) Crypt(crypt driver.Crypt) *Find { if f == nil { f = new(Find) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.toml b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.toml deleted file mode 100644 index dd52fd3db..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.toml +++ /dev/null @@ -1,105 +0,0 @@ -version = 0 -name = "Find" -documentation = "Find performs a find operation." -response.type = "batch cursor" - -[properties] -enabled = ["collection", "read concern", "read preference", "command monitor", "client session", "cluster clock"] -retryable = {mode = "once per command", type = "reads"} -legacy = "find" - -[command] -name = "find" -parameter = "collection" - -[request.filter] -type = "document" -constructor = true -documentation = "Filter determines what results are returned from find." - -[request.sort] -type = "document" -documentation = "Sort specifies the order in which to return results." - -[request.projection] -type = "document" -documentation = "Project limits the fields returned for all documents." - -[request.hint] -type = "value" -documentation = "Hint specifies the index to use." - -[request.skip] -type = "int64" -documentation = "Skip specifies the number of documents to skip before returning." - -[request.limit] -type = "int64" -documentation = "Limit sets a limit on the number of documents to return." - -[request.batchSize] -type = "int32" -documentation = "BatchSize specifies the number of documents to return in every batch." - -[request.singleBatch] -type = "boolean" -documentation = "SingleBatch specifies whether the results should be returned in a single batch." - -[request.comment] -type = "string" -documentation = "Comment sets a string to help trace an operation." - -[request.maxTimeMS] -type = "int64" -documentation = "MaxTimeMS specifies the maximum amount of time to allow the query to run." - -[request.max] -type = "document" -documentation = "Max sets an exclusive upper bound for a specific index." - -[request.min] -type = "document" -documentation = "Min sets an inclusive lower bound for a specific index." - -[request.returnKey] -type = "boolean" -documentation = "ReturnKey when true returns index keys for all result documents." - -[request.showRecordID] -type = "boolean" -documentation = "ShowRecordID when true adds a $recordId field with the record identifier to returned documents." -keyName = "showRecordId" - -[request.oplogReplay] -type = "boolean" -documentation = "OplogReplay when true replays a replica set's oplog." - -[request.noCursorTimeout] -type = "boolean" -documentation = "NoCursorTimeout when true prevents cursor from timing out after an inactivity period." - -[request.tailable] -type = "boolean" -documentation = "Tailable keeps a cursor open and resumable after the last data has been retrieved." - -[request.awaitData] -type = "boolean" -documentation = "AwaitData when true makes a cursor block before returning when no data is available." - -[request.allowPartialResults] -type = "boolean" -documentation = "AllowPartialResults when true allows partial results to be returned if some shards are down." - -[request.allowDiskUse] -type = "boolean" -minWireVersionRequired = 4 -documentation = "AllowDiskUse when true allows temporary data to be written to disk during the find command." - -[request.collation] -type = "document" -minWireVersionRequired = 5 -documentation = "Collation specifies a collation to be used." - -[request.snapshot] -type = "boolean" -documentation = "Snapshot prevents the cursor from returning a document more than once because of an intervening write operation." diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find_and_modify.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find_and_modify.go index d1ad66f03..3e09ca440 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find_and_modify.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find_and_modify.go @@ -4,8 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Code generated by operationgen. DO NOT EDIT. - package operation import ( @@ -45,13 +43,15 @@ type FindAndModify struct { selector description.ServerSelector writeConcern *writeconcern.WriteConcern retry *driver.RetryMode - crypt *driver.Crypt + crypt driver.Crypt hint bsoncore.Value serverAPI *driver.ServerAPIOptions + let bsoncore.Document result FindAndModifyResult } +// LastErrorObject represents information about updates and upserts returned by the server. type LastErrorObject struct { // True if an update modified an existing document UpdatedExisting bool @@ -59,6 +59,7 @@ type LastErrorObject struct { Upserted interface{} } +// FindAndModifyResult represents a findAndModify result returned by the server. type FindAndModifyResult struct { // Either the old or modified document, depending on the value of the new parameter. Value bsoncore.Document @@ -66,7 +67,7 @@ type FindAndModifyResult struct { LastErrorObject LastErrorObject } -func buildFindAndModifyResult(response bsoncore.Document, srvr driver.Server) (FindAndModifyResult, error) { +func buildFindAndModifyResult(response bsoncore.Document) (FindAndModifyResult, error) { elements, err := response.Elements() if err != nil { return FindAndModifyResult{}, err @@ -74,23 +75,23 @@ func buildFindAndModifyResult(response bsoncore.Document, srvr driver.Server) (F famr := FindAndModifyResult{} for _, element := range elements { switch element.Key() { - case "value": var ok bool famr.Value, ok = element.Value().DocumentOK() - if !ok { - err = fmt.Errorf("response field 'value' is type document, but received BSON type %s", element.Value().Type) + + // The 'value' field returned by a FindAndModify can be null in the case that no document was found. + if element.Value().Type != bsontype.Null && !ok { + return famr, fmt.Errorf("response field 'value' is type document or null, but received BSON type %s", element.Value().Type) } case "lastErrorObject": valDoc, ok := element.Value().DocumentOK() if !ok { - err = fmt.Errorf("response field 'lastErrorObject' is type document, but received BSON type %s", element.Value().Type) - break + return famr, fmt.Errorf("response field 'lastErrorObject' is type document, but received BSON type %s", element.Value().Type) } var leo LastErrorObject if err = bson.Unmarshal(valDoc, &leo); err != nil { - break + return famr, err } famr.LastErrorObject = leo } @@ -111,12 +112,12 @@ func (fam *FindAndModify) Result() FindAndModifyResult { return fam.result } func (fam *FindAndModify) processResponse(info driver.ResponseInfo) error { var err error - fam.result, err = buildFindAndModifyResult(info.ServerResponse, info.Server) + fam.result, err = buildFindAndModifyResult(info.ServerResponse) return err } -// Execute runs this operations and returns an error if the operaiton did not execute successfully. +// Execute runs this operations and returns an error if the operation did not execute successfully. func (fam *FindAndModify) Execute(ctx context.Context) error { if fam.deployment == nil { return errors.New("the FindAndModify operation must have a Deployment set before Execute can be called") @@ -202,6 +203,9 @@ func (fam *FindAndModify) command(dst []byte, desc description.SelectedServer) ( } dst = bsoncore.AppendValueElement(dst, "hint", fam.hint) } + if fam.let != nil { + dst = bsoncore.AppendDocumentElement(dst, "let", fam.let) + } return dst, nil } @@ -410,7 +414,7 @@ func (fam *FindAndModify) Retry(retry driver.RetryMode) *FindAndModify { } // Crypt sets the Crypt object to use for automatic encryption and decryption. -func (fam *FindAndModify) Crypt(crypt *driver.Crypt) *FindAndModify { +func (fam *FindAndModify) Crypt(crypt driver.Crypt) *FindAndModify { if fam == nil { fam = new(FindAndModify) } @@ -438,3 +442,13 @@ func (fam *FindAndModify) ServerAPI(serverAPI *driver.ServerAPIOptions) *FindAnd fam.serverAPI = serverAPI return fam } + +// Let specifies the let document to use. This option is only valid for server versions 5.0 and above. +func (fam *FindAndModify) Let(let bsoncore.Document) *FindAndModify { + if fam == nil { + fam = new(FindAndModify) + } + + fam.let = let + return fam +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find_and_modify.toml b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find_and_modify.toml deleted file mode 100644 index 4f5edf260..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find_and_modify.toml +++ /dev/null @@ -1,73 +0,0 @@ -version = 0 -name = "FindAndModify" -documentation = "FindAndModify performs a findAndModify operation." - -[properties] -enabled = ["write concern"] -retryable = {mode = "once", type = "writes"} - -[command] -name = "findAndModify" -parameter = "collection" - -[request.query] -type = "document" -constructor = true -documentation = "Query specifies the selection criteria for the modification." - -[request.sort] -type = "document" -documentation = """ -Sort determines which document the operation modifies if the query matches multiple documents.\ -The first document matched by the sort order will be modified. -""" - -[request.remove] -type = "boolean" -documentation = "Remove specifies that the matched document should be removed. Defaults to false." - -[request.update] -type = "value" -documentation = "Update specifies the update document to perform on the matched document." - -[request.newDocument] -type = "boolean" -documentation = "NewDocument specifies whether to return the modified document or the original. Defaults to false (return original)." - -[request.fields] -type = "document" -documentation = "Fields specifies a subset of fields to return." - -[request.upsert] -type = "boolean" -documentation = "Upsert specifies whether or not to create a new document if no documents match the query when doing an update. Defaults to false." - -[request.bypassDocumentValidation] -type = "boolean" -documentation = "BypassDocumentValidation specifies if document validation can be skipped when executing the operation." - -[request.maxTimeMS] -type = "int64" -documentation = "MaxTimeMS specifies the maximum amount of time to allow the operation to run." - -[request.collation] -type = "document" -minWireVersionRequired = 5 -documentation = "Collation specifies a collation to be used." - -[request.arrayFilters] -type = "array" -minWireVersionRequired = 6 -documentation = "ArrayFilters specifies an array of filter documents that determines which array elements to modify for an update operation on an array field." - -[request.hint] -type = "value" -minWireVersionRequired = 8 -documentation = "Hint specifies the index to use. This option is only valid for server versions >= 4.4. Server version 4.2 will error if this option is set. For server versions < 4.2, the driver will error if this option is set." - -[response] -name = "FindAndModifyResult" - -[response.field.value] -type = "document" -documentation = "Either the old or modified document, depending on the value of the new parameter." diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/ismaster.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/hello.go similarity index 50% rename from vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/ismaster.go rename to vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/hello.go index 544ed4e5c..5e75c08ee 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/ismaster.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/hello.go @@ -1,9 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2021-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + package operation import ( "context" "errors" - "fmt" "runtime" "strconv" @@ -17,8 +22,8 @@ import ( "go.mongodb.org/mongo-driver/x/mongo/driver/session" ) -// IsMaster is used to run the isMaster handshake operation. -type IsMaster struct { +// Hello is used to run the handshake operation. +type Hello struct { appname string compressors []string saslSupportedMechs string @@ -33,138 +38,97 @@ type IsMaster struct { res bsoncore.Document } -var _ driver.Handshaker = (*IsMaster)(nil) +var _ driver.Handshaker = (*Hello)(nil) -// NewIsMaster constructs an IsMaster. -func NewIsMaster() *IsMaster { return &IsMaster{} } +// NewHello constructs a Hello. +func NewHello() *Hello { return &Hello{} } // AppName sets the application name in the client metadata sent in this operation. -func (im *IsMaster) AppName(appname string) *IsMaster { - im.appname = appname - return im +func (h *Hello) AppName(appname string) *Hello { + h.appname = appname + return h } // ClusterClock sets the cluster clock for this operation. -func (im *IsMaster) ClusterClock(clock *session.ClusterClock) *IsMaster { - if im == nil { - im = new(IsMaster) +func (h *Hello) ClusterClock(clock *session.ClusterClock) *Hello { + if h == nil { + h = new(Hello) } - im.clock = clock - return im + h.clock = clock + return h } // Compressors sets the compressors that can be used. -func (im *IsMaster) Compressors(compressors []string) *IsMaster { - im.compressors = compressors - return im +func (h *Hello) Compressors(compressors []string) *Hello { + h.compressors = compressors + return h } // SASLSupportedMechs retrieves the supported SASL mechanism for the given user when this operation // is run. -func (im *IsMaster) SASLSupportedMechs(username string) *IsMaster { - im.saslSupportedMechs = username - return im +func (h *Hello) SASLSupportedMechs(username string) *Hello { + h.saslSupportedMechs = username + return h } // Deployment sets the Deployment for this operation. -func (im *IsMaster) Deployment(d driver.Deployment) *IsMaster { - im.d = d - return im +func (h *Hello) Deployment(d driver.Deployment) *Hello { + h.d = d + return h } // SpeculativeAuthenticate sets the document to be used for speculative authentication. -func (im *IsMaster) SpeculativeAuthenticate(doc bsoncore.Document) *IsMaster { - im.speculativeAuth = doc - return im +func (h *Hello) SpeculativeAuthenticate(doc bsoncore.Document) *Hello { + h.speculativeAuth = doc + return h } // TopologyVersion sets the TopologyVersion to be used for heartbeats. -func (im *IsMaster) TopologyVersion(tv *description.TopologyVersion) *IsMaster { - im.topologyVersion = tv - return im +func (h *Hello) TopologyVersion(tv *description.TopologyVersion) *Hello { + h.topologyVersion = tv + return h } -// MaxAwaitTimeMS sets the maximum time for the sever to wait for topology changes during a heartbeat. -func (im *IsMaster) MaxAwaitTimeMS(awaitTime int64) *IsMaster { - im.maxAwaitTimeMS = &awaitTime - return im +// MaxAwaitTimeMS sets the maximum time for the server to wait for topology changes during a heartbeat. +func (h *Hello) MaxAwaitTimeMS(awaitTime int64) *Hello { + h.maxAwaitTimeMS = &awaitTime + return h } // ServerAPI sets the server API version for this operation. -func (im *IsMaster) ServerAPI(serverAPI *driver.ServerAPIOptions) *IsMaster { - im.serverAPI = serverAPI - return im +func (h *Hello) ServerAPI(serverAPI *driver.ServerAPIOptions) *Hello { + h.serverAPI = serverAPI + return h } // LoadBalanced specifies whether or not this operation is being sent over a connection to a load balanced cluster. -func (im *IsMaster) LoadBalanced(lb bool) *IsMaster { - im.loadBalanced = lb - return im +func (h *Hello) LoadBalanced(lb bool) *Hello { + h.loadBalanced = lb + return h } // Result returns the result of executing this operation. -func (im *IsMaster) Result(addr address.Address) description.Server { - return description.NewServer(addr, bson.Raw(im.res)) -} - -func (im *IsMaster) decodeStringSlice(element bsoncore.Element, name string) ([]string, error) { - arr, ok := element.Value().ArrayOK() - if !ok { - return nil, fmt.Errorf("expected '%s' to be an array but it's a BSON %s", name, element.Value().Type) - } - vals, err := arr.Values() - if err != nil { - return nil, err - } - var strs []string - for _, val := range vals { - str, ok := val.StringValueOK() - if !ok { - return nil, fmt.Errorf("expected '%s' to be an array of strings, but found a BSON %s", name, val.Type) - } - strs = append(strs, str) - } - return strs, nil -} - -func (im *IsMaster) decodeStringMap(element bsoncore.Element, name string) (map[string]string, error) { - doc, ok := element.Value().DocumentOK() - if !ok { - return nil, fmt.Errorf("expected '%s' to be a document but it's a BSON %s", name, element.Value().Type) - } - elements, err := doc.Elements() - if err != nil { - return nil, err - } - m := make(map[string]string) - for _, element := range elements { - key := element.Key() - value, ok := element.Value().StringValueOK() - if !ok { - return nil, fmt.Errorf("expected '%s' to be a document of strings, but found a BSON %s", name, element.Value().Type) - } - m[key] = value - } - return m, nil +func (h *Hello) Result(addr address.Address) description.Server { + return description.NewServer(addr, bson.Raw(h.res)) } // handshakeCommand appends all necessary command fields as well as client metadata, SASL supported mechs, and compression. -func (im *IsMaster) handshakeCommand(dst []byte, desc description.SelectedServer) ([]byte, error) { - dst, err := im.command(dst, desc) +func (h *Hello) handshakeCommand(dst []byte, desc description.SelectedServer) ([]byte, error) { + dst, err := h.command(dst, desc) if err != nil { return dst, err } - if im.saslSupportedMechs != "" { - dst = bsoncore.AppendStringElement(dst, "saslSupportedMechs", im.saslSupportedMechs) + if h.saslSupportedMechs != "" { + dst = bsoncore.AppendStringElement(dst, "saslSupportedMechs", h.saslSupportedMechs) } - if im.speculativeAuth != nil { - dst = bsoncore.AppendDocumentElement(dst, "speculativeAuthenticate", im.speculativeAuth) + if h.speculativeAuth != nil { + dst = bsoncore.AppendDocumentElement(dst, "speculativeAuthenticate", h.speculativeAuth) } var idx int32 idx, dst = bsoncore.AppendArrayElementStart(dst, "compression") - for i, compressor := range im.compressors { + for i, compressor := range h.compressors { dst = bsoncore.AppendStringElement(dst, strconv.Itoa(i), compressor) } dst, _ = bsoncore.AppendArrayEnd(dst, idx) @@ -183,9 +147,9 @@ func (im *IsMaster) handshakeCommand(dst []byte, desc description.SelectedServer dst, _ = bsoncore.AppendDocumentEnd(dst, didx) dst = bsoncore.AppendStringElement(dst, "platform", runtime.Version()) - if im.appname != "" { + if h.appname != "" { didx, dst = bsoncore.AppendDocumentElementStart(dst, "application") - dst = bsoncore.AppendStringElement(dst, "name", im.appname) + dst = bsoncore.AppendStringElement(dst, "name", h.appname) dst, _ = bsoncore.AppendDocumentEnd(dst, didx) } dst, _ = bsoncore.AppendDocumentEnd(dst, idx) @@ -194,14 +158,17 @@ func (im *IsMaster) handshakeCommand(dst []byte, desc description.SelectedServer } // command appends all necessary command fields. -func (im *IsMaster) command(dst []byte, _ description.SelectedServer) ([]byte, error) { - if im.serverAPI != nil { +func (h *Hello) command(dst []byte, desc description.SelectedServer) ([]byte, error) { + // Use "hello" if topology is LoadBalanced, API version is declared or server + // has responded with "helloOk". Otherwise, use legacy hello. + if desc.Kind == description.LoadBalanced || h.serverAPI != nil || desc.Server.HelloOK { dst = bsoncore.AppendInt32Element(dst, "hello", 1) } else { - dst = bsoncore.AppendInt32Element(dst, "isMaster", 1) + dst = bsoncore.AppendInt32Element(dst, internal.LegacyHello, 1) } + dst = bsoncore.AppendBooleanElement(dst, "helloOk", true) - if tv := im.topologyVersion; tv != nil { + if tv := h.topologyVersion; tv != nil { var tvIdx int32 tvIdx, dst = bsoncore.AppendDocumentElementStart(dst, "topologyVersion") @@ -209,10 +176,10 @@ func (im *IsMaster) command(dst []byte, _ description.SelectedServer) ([]byte, e dst = bsoncore.AppendInt64Element(dst, "counter", tv.Counter) dst, _ = bsoncore.AppendDocumentEnd(dst, tvIdx) } - if im.maxAwaitTimeMS != nil { - dst = bsoncore.AppendInt64Element(dst, "maxAwaitTimeMS", *im.maxAwaitTimeMS) + if h.maxAwaitTimeMS != nil { + dst = bsoncore.AppendInt64Element(dst, "maxAwaitTimeMS", *h.maxAwaitTimeMS) } - if im.loadBalanced { + if h.loadBalanced { // The loadBalanced parameter should only be added if it's true. We should never explicitly send // loadBalanced=false per the load balancing spec. dst = bsoncore.AppendBooleanElement(dst, "loadBalanced", true) @@ -222,67 +189,70 @@ func (im *IsMaster) command(dst []byte, _ description.SelectedServer) ([]byte, e } // Execute runs this operation. -func (im *IsMaster) Execute(ctx context.Context) error { - if im.d == nil { - return errors.New("an IsMaster must have a Deployment set before Execute can be called") +func (h *Hello) Execute(ctx context.Context) error { + if h.d == nil { + return errors.New("a Hello must have a Deployment set before Execute can be called") } - return im.createOperation().Execute(ctx, nil) + return h.createOperation().Execute(ctx, nil) } -// StreamResponse gets the next streaming isMaster response from the server. -func (im *IsMaster) StreamResponse(ctx context.Context, conn driver.StreamerConnection) error { - return im.createOperation().ExecuteExhaust(ctx, conn, nil) +// StreamResponse gets the next streaming Hello response from the server. +func (h *Hello) StreamResponse(ctx context.Context, conn driver.StreamerConnection) error { + return h.createOperation().ExecuteExhaust(ctx, conn, nil) } -func (im *IsMaster) createOperation() driver.Operation { +func (h *Hello) createOperation() driver.Operation { return driver.Operation{ - Clock: im.clock, - CommandFn: im.command, + Clock: h.clock, + CommandFn: h.command, Database: "admin", - Deployment: im.d, + Deployment: h.d, ProcessResponseFn: func(info driver.ResponseInfo) error { - im.res = info.ServerResponse + h.res = info.ServerResponse return nil }, - ServerAPI: im.serverAPI, + ServerAPI: h.serverAPI, } } // GetHandshakeInformation performs the MongoDB handshake for the provided connection and returns the relevant // information about the server. This function implements the driver.Handshaker interface. -func (im *IsMaster) GetHandshakeInformation(ctx context.Context, _ address.Address, c driver.Connection) (driver.HandshakeInformation, error) { +func (h *Hello) GetHandshakeInformation(ctx context.Context, _ address.Address, c driver.Connection) (driver.HandshakeInformation, error) { err := driver.Operation{ - Clock: im.clock, - CommandFn: im.handshakeCommand, + Clock: h.clock, + CommandFn: h.handshakeCommand, Deployment: driver.SingleConnectionDeployment{c}, Database: "admin", ProcessResponseFn: func(info driver.ResponseInfo) error { - im.res = info.ServerResponse + h.res = info.ServerResponse return nil }, - ServerAPI: im.serverAPI, + ServerAPI: h.serverAPI, }.Execute(ctx, nil) if err != nil { return driver.HandshakeInformation{}, err } info := driver.HandshakeInformation{ - Description: im.Result(c.Address()), + Description: h.Result(c.Address()), } - if speculativeAuthenticate, ok := im.res.Lookup("speculativeAuthenticate").DocumentOK(); ok { + if speculativeAuthenticate, ok := h.res.Lookup("speculativeAuthenticate").DocumentOK(); ok { info.SpeculativeAuthenticate = speculativeAuthenticate } + if serverConnectionID, ok := h.res.Lookup("connectionId").Int32OK(); ok { + info.ServerConnectionID = &serverConnectionID + } // Cast to bson.Raw to lookup saslSupportedMechs to avoid converting from bsoncore.Value to bson.RawValue for the // StringSliceFromRawValue call. - if saslSupportedMechs, lookupErr := bson.Raw(im.res).LookupErr("saslSupportedMechs"); lookupErr == nil { + if saslSupportedMechs, lookupErr := bson.Raw(h.res).LookupErr("saslSupportedMechs"); lookupErr == nil { info.SaslSupportedMechs, err = internal.StringSliceFromRawValue("saslSupportedMechs", saslSupportedMechs) } return info, err } // FinishHandshake implements the Handshaker interface. This is a no-op function because a non-authenticated connection -// does not do anything besides the initial isMaster for a handshake. -func (im *IsMaster) FinishHandshake(context.Context, driver.Connection) error { +// does not do anything besides the initial Hello for a handshake. +func (h *Hello) FinishHandshake(context.Context, driver.Connection) error { return nil } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/insert.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/insert.go index b4d09537b..993eac101 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/insert.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/insert.go @@ -4,8 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Code generated by operationgen. DO NOT EDIT. - package operation import ( @@ -30,7 +28,7 @@ type Insert struct { clock *session.ClusterClock collection string monitor *event.CommandMonitor - crypt *driver.Crypt + crypt driver.Crypt database string deployment driver.Deployment selector description.ServerSelector @@ -40,12 +38,13 @@ type Insert struct { serverAPI *driver.ServerAPIOptions } +// InsertResult represents an insert result returned by the server. type InsertResult struct { // Number of documents successfully inserted. - N int32 + N int64 } -func buildInsertResult(response bsoncore.Document, srvr driver.Server) (InsertResult, error) { +func buildInsertResult(response bsoncore.Document) (InsertResult, error) { elements, err := response.Elements() if err != nil { return InsertResult{}, err @@ -55,9 +54,9 @@ func buildInsertResult(response bsoncore.Document, srvr driver.Server) (InsertRe switch element.Key() { case "n": var ok bool - ir.N, ok = element.Value().AsInt32OK() + ir.N, ok = element.Value().AsInt64OK() if !ok { - err = fmt.Errorf("response field 'n' is type int32, but received BSON type %s", element.Value().Type) + return ir, fmt.Errorf("response field 'n' is type int32 or int64, but received BSON type %s", element.Value().Type) } } } @@ -75,12 +74,12 @@ func NewInsert(documents ...bsoncore.Document) *Insert { func (i *Insert) Result() InsertResult { return i.result } func (i *Insert) processResponse(info driver.ResponseInfo) error { - ir, err := buildInsertResult(info.ServerResponse, info.Server) + ir, err := buildInsertResult(info.ServerResponse) i.result.N += ir.N return err } -// Execute runs this operations and returns an error if the operaiton did not execute successfully. +// Execute runs this operations and returns an error if the operation did not execute successfully. func (i *Insert) Execute(ctx context.Context) error { if i.deployment == nil { return errors.New("the Insert operation must have a Deployment set before Execute can be called") @@ -195,7 +194,7 @@ func (i *Insert) CommandMonitor(monitor *event.CommandMonitor) *Insert { } // Crypt sets the Crypt object to use for automatic encryption and decryption. -func (i *Insert) Crypt(crypt *driver.Crypt) *Insert { +func (i *Insert) Crypt(crypt driver.Crypt) *Insert { if i == nil { i = new(Insert) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/insert.toml b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/insert.toml deleted file mode 100644 index 24fdd51c9..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/insert.toml +++ /dev/null @@ -1,45 +0,0 @@ -version = 0 -name = "Insert" -documentation = "Insert performs an insert operation." - -[properties] -enabled = ["write concern"] -retryable = {mode = "once per command", type = "writes"} -batches = "documents" - -[command] -name = "insert" -parameter = "collection" - -[request.documents] -type = "document" -slice = true -constructor = true -variadic = true -required = true -documentation = """ -Documents adds documents to this operation that will be inserted when this operation is -executed.\ -""" - -[request.ordered] -type = "boolean" -documentation = """ -Ordered sets ordered. If true, when a write fails, the operation will return the error, when -false write failures do not stop execution of the operation.\ -""" - -[request.bypassDocumentValidation] -type = "boolean" -minWireVersion = 4 -documentation = """ -BypassDocumentValidation allows the operation to opt-out of document level validation. Valid -for server versions >= 3.2. For servers < 3.2, this setting is ignored.\ -""" - -[response] -name = "InsertResult" - -[response.field.n] -type = "int32" -documentation = "Number of documents successfully inserted." diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/listDatabases.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/listDatabases.go index 067c84aea..6b469b6fb 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/listDatabases.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/listDatabases.go @@ -4,8 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Code generated by operationgen. DO NOT EDIT. - package operation import ( @@ -35,12 +33,13 @@ type ListDatabases struct { readPreference *readpref.ReadPref retry *driver.RetryMode selector description.ServerSelector - crypt *driver.Crypt + crypt driver.Crypt serverAPI *driver.ServerAPIOptions result ListDatabasesResult } +// ListDatabasesResult represents a listDatabases result returned by the server. type ListDatabasesResult struct { // An array of documents, one document for each database Databases []databaseRecord @@ -54,7 +53,7 @@ type databaseRecord struct { Empty bool } -func buildListDatabasesResult(response bsoncore.Document, srvr driver.Server) (ListDatabasesResult, error) { +func buildListDatabasesResult(response bsoncore.Document) (ListDatabasesResult, error) { elements, err := response.Elements() if err != nil { return ListDatabasesResult{}, err @@ -62,76 +61,64 @@ func buildListDatabasesResult(response bsoncore.Document, srvr driver.Server) (L ir := ListDatabasesResult{} for _, element := range elements { switch element.Key() { - case "totalSize": var ok bool ir.TotalSize, ok = element.Value().AsInt64OK() if !ok { - err = fmt.Errorf("response field 'totalSize' is type int64, but received BSON type %s: %s", element.Value().Type, element.Value()) + return ir, fmt.Errorf("response field 'totalSize' is type int64, but received BSON type %s: %s", element.Value().Type, element.Value()) } - case "databases": - // TODO: Make operationgen handle array results. arr, ok := element.Value().ArrayOK() if !ok { - err = fmt.Errorf("response field 'databases' is type array, but received BSON type %s", element.Value().Type) - continue + return ir, fmt.Errorf("response field 'databases' is type array, but received BSON type %s", element.Value().Type) } var tmp bsoncore.Document - marshalErr := bson.Unmarshal(arr, &tmp) - if marshalErr != nil { - err = marshalErr - continue + err := bson.Unmarshal(arr, &tmp) + if err != nil { + return ir, err } - records, marshalErr := tmp.Elements() - if marshalErr != nil { - err = marshalErr - continue + + records, err := tmp.Elements() + if err != nil { + return ir, err } ir.Databases = make([]databaseRecord, len(records)) for i, val := range records { valueDoc, ok := val.Value().DocumentOK() if !ok { - err = fmt.Errorf("'databases' element is type document, but received BSON type %s", val.Value().Type) - continue + return ir, fmt.Errorf("'databases' element is type document, but received BSON type %s", val.Value().Type) } - elems, marshalErr := valueDoc.Elements() - if marshalErr != nil { - err = marshalErr - continue + elems, err := valueDoc.Elements() + if err != nil { + return ir, err } + for _, elem := range elems { switch elem.Key() { - case "name": ir.Databases[i].Name, ok = elem.Value().StringValueOK() if !ok { - err = fmt.Errorf("response field 'name' is type string, but received BSON type %s", elem.Value().Type) - continue + return ir, fmt.Errorf("response field 'name' is type string, but received BSON type %s", elem.Value().Type) } - case "sizeOnDisk": ir.Databases[i].SizeOnDisk, ok = elem.Value().AsInt64OK() if !ok { - err = fmt.Errorf("response field 'sizeOnDisk' is type int64, but received BSON type %s", elem.Value().Type) - continue + return ir, fmt.Errorf("response field 'sizeOnDisk' is type int64, but received BSON type %s", elem.Value().Type) } - case "empty": ir.Databases[i].Empty, ok = elem.Value().BooleanOK() if !ok { - err = fmt.Errorf("response field 'empty' is type bool, but received BSON type %s", elem.Value().Type) - continue + return ir, fmt.Errorf("response field 'empty' is type bool, but received BSON type %s", elem.Value().Type) } } } } } } - return ir, err + return ir, nil } // NewListDatabases constructs and returns a new ListDatabases. @@ -147,12 +134,12 @@ func (ld *ListDatabases) Result() ListDatabasesResult { return ld.result } func (ld *ListDatabases) processResponse(info driver.ResponseInfo) error { var err error - ld.result, err = buildListDatabasesResult(info.ServerResponse, info.Server) + ld.result, err = buildListDatabasesResult(info.ServerResponse) return err } -// Execute runs this operations and returns an error if the operaiton did not execute successfully. +// Execute runs this operations and returns an error if the operation did not execute successfully. func (ld *ListDatabases) Execute(ctx context.Context) error { if ld.deployment == nil { return errors.New("the ListDatabases operation must have a Deployment set before Execute can be called") @@ -307,7 +294,7 @@ func (ld *ListDatabases) Retry(retry driver.RetryMode) *ListDatabases { } // Crypt sets the Crypt object to use for automatic encryption and decryption. -func (ld *ListDatabases) Crypt(crypt *driver.Crypt) *ListDatabases { +func (ld *ListDatabases) Crypt(crypt driver.Crypt) *ListDatabases { if ld == nil { ld = new(ListDatabases) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/listDatabases.toml b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/listDatabases.toml deleted file mode 100644 index 29d5a0218..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/listDatabases.toml +++ /dev/null @@ -1,37 +0,0 @@ -version = 0 -name = "ListDatabases" -documentation = "ListDatabases performs a listDatabases operation." - -[properties] -enabled = ["read preference"] -retryable = {mode = "once per command", type = "reads"} -disabled = ["collection"] - -[command] -name = "listDatabases" -parameter = "database" - -[request.filter] -type = "document" -constructor = true -documentation = "Filter determines what results are returned from listDatabases." - - -[request.nameOnly] -type = "boolean" -documentation = "NameOnly specifies whether to only return database names." - -[request.authorizedDatabases] -type = "boolean" -documentation = "AuthorizedDatabases specifies whether to only return databases which the user is authorized to use." - -[response] -name = "ListDatabasesResult" - -[response.field.totalSize] -type = "int64" -documentation = "The sum of the size of all the database files on disk in bytes." - -[response.field.databases] -type = "value" -documentation = "An array of documents, one document for each database" diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_collections.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_collections.go index 8f96eb137..de30afd3d 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_collections.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_collections.go @@ -4,8 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Code generated by operationgen. DO NOT EDIT. - package operation import ( @@ -22,20 +20,21 @@ import ( // ListCollections performs a listCollections operation. type ListCollections struct { - filter bsoncore.Document - nameOnly *bool - session *session.Client - clock *session.ClusterClock - monitor *event.CommandMonitor - crypt *driver.Crypt - database string - deployment driver.Deployment - readPreference *readpref.ReadPref - selector description.ServerSelector - retry *driver.RetryMode - result driver.CursorResponse - batchSize *int32 - serverAPI *driver.ServerAPIOptions + filter bsoncore.Document + nameOnly *bool + authorizedCollections *bool + session *session.Client + clock *session.ClusterClock + monitor *event.CommandMonitor + crypt driver.Crypt + database string + deployment driver.Deployment + readPreference *readpref.ReadPref + selector description.ServerSelector + retry *driver.RetryMode + result driver.CursorResponse + batchSize *int32 + serverAPI *driver.ServerAPIOptions } // NewListCollections constructs and returns a new ListCollections. @@ -65,7 +64,7 @@ func (lc *ListCollections) processResponse(info driver.ResponseInfo) error { return err } -// Execute runs this operations and returns an error if the operaiton did not execute successfully. +// Execute runs this operations and returns an error if the operation did not execute successfully. func (lc *ListCollections) Execute(ctx context.Context) error { if lc.deployment == nil { return errors.New("the ListCollections operation must have a Deployment set before Execute can be called") @@ -91,7 +90,6 @@ func (lc *ListCollections) Execute(ctx context.Context) error { } func (lc *ListCollections) command(dst []byte, desc description.SelectedServer) ([]byte, error) { - dst = bsoncore.AppendInt32Element(dst, "listCollections", 1) if lc.filter != nil { dst = bsoncore.AppendDocumentElement(dst, "filter", lc.filter) @@ -99,6 +97,10 @@ func (lc *ListCollections) command(dst []byte, desc description.SelectedServer) if lc.nameOnly != nil { dst = bsoncore.AppendBooleanElement(dst, "nameOnly", *lc.nameOnly) } + if lc.authorizedCollections != nil { + dst = bsoncore.AppendBooleanElement(dst, "authorizedCollections", *lc.authorizedCollections) + } + cursorDoc := bsoncore.NewDocumentBuilder() if lc.batchSize != nil { cursorDoc.AppendInt32("batchSize", *lc.batchSize) @@ -128,6 +130,17 @@ func (lc *ListCollections) NameOnly(nameOnly bool) *ListCollections { return lc } +// AuthorizedCollections specifies whether to only return collections the user +// is authorized to use. +func (lc *ListCollections) AuthorizedCollections(authorizedCollections bool) *ListCollections { + if lc == nil { + lc = new(ListCollections) + } + + lc.authorizedCollections = &authorizedCollections + return lc +} + // Session sets the session for this operation. func (lc *ListCollections) Session(session *session.Client) *ListCollections { if lc == nil { @@ -159,7 +172,7 @@ func (lc *ListCollections) CommandMonitor(monitor *event.CommandMonitor) *ListCo } // Crypt sets the Crypt object to use for automatic encryption and decryption. -func (lc *ListCollections) Crypt(crypt *driver.Crypt) *ListCollections { +func (lc *ListCollections) Crypt(crypt driver.Crypt) *ListCollections { if lc == nil { lc = new(ListCollections) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_collections.toml b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_collections.toml deleted file mode 100644 index 383dae11f..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_collections.toml +++ /dev/null @@ -1,23 +0,0 @@ -version = 0 -name = "ListCollections" -documentation = "ListCollections performs a listCollections operation." -response.type = "list collections batch cursor" - -[properties] -enabled = ["read preference"] -disabled = ["collection"] -retryable = {mode = "once per command", type = "reads"} -legacy = "listCollections" - -[command] -name = "listCollections" -parameter = "database" - -[request.filter] -type = "document" -constructor = true -documentation = "Filter determines what results are returned from listCollections." - -[request.nameOnly] -type = "boolean" -documentation = "NameOnly specifies whether to only return collection names." diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_indexes.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_indexes.go index 33db54e45..171e2b5a3 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_indexes.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_indexes.go @@ -4,8 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Code generated by operationgen. DO NOT EDIT. - package operation import ( @@ -31,7 +29,7 @@ type ListIndexes struct { deployment driver.Deployment selector description.ServerSelector retry *driver.RetryMode - crypt *driver.Crypt + crypt driver.Crypt serverAPI *driver.ServerAPIOptions result driver.CursorResponse @@ -60,7 +58,7 @@ func (li *ListIndexes) processResponse(info driver.ResponseInfo) error { } -// Execute runs this operations and returns an error if the operaiton did not execute successfully. +// Execute runs this operations and returns an error if the operation did not execute successfully. func (li *ListIndexes) Execute(ctx context.Context) error { if li.deployment == nil { return errors.New("the ListIndexes operation must have a Deployment set before Execute can be called") @@ -205,7 +203,7 @@ func (li *ListIndexes) Retry(retry driver.RetryMode) *ListIndexes { } // Crypt sets the Crypt object to use for automatic encryption and decryption. -func (li *ListIndexes) Crypt(crypt *driver.Crypt) *ListIndexes { +func (li *ListIndexes) Crypt(crypt driver.Crypt) *ListIndexes { if li == nil { li = new(ListIndexes) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_indexes.toml b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_indexes.toml deleted file mode 100644 index 98d0f8862..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_indexes.toml +++ /dev/null @@ -1,20 +0,0 @@ -version = 0 -name = "ListIndexes" -documentation = "ListIndexes performs a listIndexes operation." -response.type = "batch cursor" - -[properties] -legacy = "listIndexes" -retryable = {mode = "once per command", type = "reads"} - -[command] -name = "listIndexes" -parameter = "collection" - -[request.batchSize] -type = "int32" -documentation = "BatchSize specifies the number of documents to return in every batch." - -[request.maxTimeMS] -type = "int64" -documentation = "MaxTimeMS specifies the maximum amount of time to allow the query to run." diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/operation.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/operation.go deleted file mode 100644 index 3a13b504b..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/operation.go +++ /dev/null @@ -1,15 +0,0 @@ -package operation - -//go:generate operationgen insert.toml operation insert.go -//go:generate operationgen find.toml operation find.go -//go:generate operationgen list_collections.toml operation list_collections.go -//go:generate operationgen createIndexes.toml operation createIndexes.go -//go:generate operationgen drop_collection.toml operation drop_collection.go -//go:generate operationgen distinct.toml operation distinct.go -//go:generate operationgen drop_indexes.toml operation drop_indexes.go -//go:generate operationgen drop_database.toml operation drop_database.go -//go:generate operationgen commit_transaction.toml operation commit_transaction.go -//go:generate operationgen abort_transaction.toml operation abort_transaction.go -//go:generate operationgen count.toml operation count.go -//go:generate operationgen end_sessions.toml operation end_sessions.go -//go:generate operationgen create.toml operation create.go diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update.go index 0a3a9e6d0..3cd11848d 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update.go @@ -4,8 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// NOTE: This file is maintained by hand because operationgen cannot generate it. - package operation import ( @@ -39,8 +37,9 @@ type Update struct { writeConcern *writeconcern.WriteConcern retry *driver.RetryMode result UpdateResult - crypt *driver.Crypt + crypt driver.Crypt serverAPI *driver.ServerAPIOptions + let bsoncore.Document } // Upsert contains the information for an upsert in an Update operation. @@ -52,14 +51,14 @@ type Upsert struct { // UpdateResult contains information for the result of an Update operation. type UpdateResult struct { // Number of documents matched. - N int32 + N int64 // Number of documents modified. - NModified int32 + NModified int64 // Information about upserted documents. Upserted []Upsert } -func buildUpdateResult(response bsoncore.Document, srvr driver.Server) (UpdateResult, error) { +func buildUpdateResult(response bsoncore.Document) (UpdateResult, error) { elements, err := response.Elements() if err != nil { return UpdateResult{}, err @@ -67,26 +66,22 @@ func buildUpdateResult(response bsoncore.Document, srvr driver.Server) (UpdateRe ur := UpdateResult{} for _, element := range elements { switch element.Key() { - case "nModified": var ok bool - ur.NModified, ok = element.Value().Int32OK() + ur.NModified, ok = element.Value().AsInt64OK() if !ok { - err = fmt.Errorf("response field 'nModified' is type int32, but received BSON type %s", element.Value().Type) + return ur, fmt.Errorf("response field 'nModified' is type int32 or int64, but received BSON type %s", element.Value().Type) } - case "n": var ok bool - ur.N, ok = element.Value().Int32OK() + ur.N, ok = element.Value().AsInt64OK() if !ok { - err = fmt.Errorf("response field 'n' is type int32, but received BSON type %s", element.Value().Type) + return ur, fmt.Errorf("response field 'n' is type int32 or int64, but received BSON type %s", element.Value().Type) } - case "upserted": arr, ok := element.Value().ArrayOK() if !ok { - err = fmt.Errorf("response field 'upserted' is type array, but received BSON type %s", element.Value().Type) - break + return ur, fmt.Errorf("response field 'upserted' is type array, but received BSON type %s", element.Value().Type) } var values []bsoncore.Value @@ -98,12 +93,11 @@ func buildUpdateResult(response bsoncore.Document, srvr driver.Server) (UpdateRe for _, val := range values { valDoc, ok := val.DocumentOK() if !ok { - err = fmt.Errorf("upserted value is type document, but received BSON type %s", val.Type) - break + return ur, fmt.Errorf("upserted value is type document, but received BSON type %s", val.Type) } var upsert Upsert if err = bson.Unmarshal(valDoc, &upsert); err != nil { - break + return ur, err } ur.Upserted = append(ur.Upserted, upsert) } @@ -123,7 +117,7 @@ func NewUpdate(updates ...bsoncore.Document) *Update { func (u *Update) Result() UpdateResult { return u.result } func (u *Update) processResponse(info driver.ResponseInfo) error { - ur, err := buildUpdateResult(info.ServerResponse, info.Server) + ur, err := buildUpdateResult(info.ServerResponse) u.result.N += ur.N u.result.NModified += ur.NModified @@ -137,7 +131,7 @@ func (u *Update) processResponse(info driver.ResponseInfo) error { } -// Execute runs this operations and returns an error if the operaiton did not execute successfully. +// Execute runs this operations and returns an error if the operation did not execute successfully. func (u *Update) Execute(ctx context.Context) error { if u.deployment == nil { return errors.New("the Update operation must have a Deployment set before Execute can be called") @@ -192,6 +186,9 @@ func (u *Update) command(dst []byte, desc description.SelectedServer) ([]byte, e return nil, errors.New("the 'arrayFilters' command parameter requires a minimum server wire version of 6") } } + if u.let != nil { + dst = bsoncore.AppendDocumentElement(dst, "let", u.let) + } return dst, nil } @@ -346,7 +343,7 @@ func (u *Update) Retry(retry driver.RetryMode) *Update { } // Crypt sets the Crypt object to use for automatic encryption and decryption. -func (u *Update) Crypt(crypt *driver.Crypt) *Update { +func (u *Update) Crypt(crypt driver.Crypt) *Update { if u == nil { u = new(Update) } @@ -364,3 +361,13 @@ func (u *Update) ServerAPI(serverAPI *driver.ServerAPIOptions) *Update { u.serverAPI = serverAPI return u } + +// Let specifies the let document to use. This option is only valid for server versions 5.0 and above. +func (u *Update) Let(let bsoncore.Document) *Update { + if u == nil { + u = new(Update) + } + + u.let = let + return u +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update.toml b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update.toml deleted file mode 100644 index 6690c213e..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update.toml +++ /dev/null @@ -1,58 +0,0 @@ -version = 0 -name = "Update" -documentation = "Update performs an update operation." - -[properties] -enabled = ["write concern"] -retryable = {mode = "once per command", type = "writes"} -batches = "updates" - -[command] -name = "update" -parameter = "collection" - -[request.updates] -type = "document" -slice = true -constructor = true -variadic = true -required = true -documentation = """ -Updates specifies an array of update statements to perform when this operation is executed. -Each update document must have the following structure: {q: , u: , multi: , collation: Optional, arrayFitlers: Optional, hint: Optional}.\ -""" - -[request.ordered] -type = "boolean" -documentation = """ -Ordered sets ordered. If true, when a write fails, the operation will return the error, when -false write failures do not stop execution of the operation.\ -""" - -[request.bypassDocumentValidation] -type = "boolean" -minWireVersion = 4 -documentation = """ -BypassDocumentValidation allows the operation to opt-out of document level validation. Valid -for server versions >= 3.2. For servers < 3.2, this setting is ignored.\ -""" - -[request.hint] -type = "boolean" -minWireVersionRequired = 5 -documentation = """ -Hint is a flag to indicate that the update document contains a hint. Hint is only supported by -servers >= 4.2. Older servers >= 3.4 will report an error for using the hint option. For servers < -3.4, the driver will return an error if the hint option is used.\ -""" - -[response] -name = "UpdateResult" - -[response.field.n] -type = "int32" -documentation = "Number of documents matched." - -[response.field.nModified] -type = "int32" -documentation = "Number of documents modified." diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation_legacy.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation_legacy.go index a4390d050..2584f484a 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation_legacy.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation_legacy.go @@ -97,7 +97,7 @@ func (op Operation) createLegacyFindWireMessage(dst []byte, desc description.Sel // build options as a byte slice of elements rather than a bsoncore.Document because they will be appended // to another document with $query var optsElems []byte - flags := op.slaveOK(desc) + flags := op.secondaryOK(desc) var numToSkip, numToReturn, batchSize, limit int32 // numToReturn calculated from batchSize and limit var filter, returnFieldsSelector bsoncore.Document var collName string @@ -162,7 +162,7 @@ func (op Operation) createLegacyFindWireMessage(dst []byte, desc description.Sel numToReturn = op.calculateNumberToReturn(limit, batchSize) // add read preference if needed - rp, err := op.createReadPref(desc.Server.Kind, desc.Kind, true) + rp, err := op.createReadPref(desc, true) if err != nil { return dst, info, "", err } @@ -451,7 +451,7 @@ func (op Operation) createLegacyListCollectionsWiremessage(dst []byte, desc desc if err != nil { return dst, info, "", err } - rp, err := op.createReadPref(desc.Server.Kind, desc.Kind, true) + rp, err := op.createReadPref(desc, true) if err != nil { return dst, info, "", err } @@ -466,7 +466,7 @@ func (op Operation) createLegacyListCollectionsWiremessage(dst []byte, desc desc var wmIdx int32 wmIdx, dst = wiremessage.AppendHeaderStart(dst, info.requestID, 0, wiremessage.OpQuery) - dst = wiremessage.AppendQueryFlags(dst, op.slaveOK(desc)) + dst = wiremessage.AppendQueryFlags(dst, op.secondaryOK(desc)) dst = wiremessage.AppendQueryFullCollectionName(dst, op.getFullCollectionName(listCollectionsNamespace)) dst = wiremessage.AppendQueryNumberToSkip(dst, 0) dst = wiremessage.AppendQueryNumberToReturn(dst, batchSize) @@ -607,7 +607,7 @@ func (op Operation) createLegacyListIndexesWiremessage(dst []byte, desc descript filter = bsoncore.AppendStringElement(filter, "ns", op.getFullCollectionName(filterCollName)) filter, _ = bsoncore.AppendDocumentEnd(filter, fidx) - rp, err := op.createReadPref(desc.Server.Kind, desc.Kind, true) + rp, err := op.createReadPref(desc, true) if err != nil { return dst, info, "", err } @@ -617,7 +617,7 @@ func (op Operation) createLegacyListIndexesWiremessage(dst []byte, desc descript var wmIdx int32 wmIdx, dst = wiremessage.AppendHeaderStart(dst, info.requestID, 0, wiremessage.OpQuery) - dst = wiremessage.AppendQueryFlags(dst, op.slaveOK(desc)) + dst = wiremessage.AppendQueryFlags(dst, op.secondaryOK(desc)) dst = wiremessage.AppendQueryFullCollectionName(dst, op.getFullCollectionName(listIndexesNamespace)) dst = wiremessage.AppendQueryNumberToSkip(dst, 0) dst = wiremessage.AppendQueryNumberToReturn(dst, batchSize) diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/client_session.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/client_session.go index 64adeb71e..9e01f9fe5 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/client_session.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/client_session.go @@ -40,7 +40,7 @@ var ErrAbortTwice = errors.New("cannot call abortTransaction twice") // ErrCommitAfterAbort is returned if commit is called after an abort. var ErrCommitAfterAbort = errors.New("cannot call commitTransaction after calling abortTransaction") -// ErrUnackWCUnsupported is returned if an unacknowledged write concern is supported for a transaciton. +// ErrUnackWCUnsupported is returned if an unacknowledged write concern is supported for a transaction. var ErrUnackWCUnsupported = errors.New("transactions do not support unacknowledged write concerns") // ErrSnapshotTransaction is returned if an transaction is started on a snapshot session. @@ -95,6 +95,7 @@ type LoadBalancedTransactionConnection interface { Description() description.Server Close() error ID() string + ServerConnectionID() *int32 Address() address.Address Stale() bool @@ -345,8 +346,6 @@ func (c *Client) EndSession() { c.Terminated = true c.pool.ReturnSession(c.Server) - - return } // TransactionInProgress returns true if the client session is in an active transaction. @@ -365,7 +364,7 @@ func (c *Client) TransactionRunning() bool { return c != nil && (c.TransactionState == Starting || c.TransactionState == InProgress) } -// TransactionCommitted returns true of the client session just committed a transaciton. +// TransactionCommitted returns true of the client session just committed a transaction. func (c *Client) TransactionCommitted() bool { return c.TransactionState == Committed } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/server_session.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/server_session.go index e7881fa17..54152ea0f 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/server_session.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/server_session.go @@ -9,15 +9,11 @@ package session import ( "time" - "crypto/rand" - "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver/uuid" ) -var rander = rand.Reader - // Server is an open session with the server. type Server struct { SessionID bsoncore.Document diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go index 4255a7e2a..c91a1b5ec 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go @@ -18,7 +18,6 @@ import ( "sync/atomic" "time" - "go.mongodb.org/mongo-driver/event" "go.mongodb.org/mongo-driver/internal" "go.mongodb.org/mongo-driver/mongo/address" "go.mongodb.org/mongo-driver/mongo/description" @@ -28,17 +27,29 @@ import ( "go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage" ) +// Connection state constants. +const ( + connDisconnected int64 = iota + connConnected + connInitialized +) + var globalConnectionID uint64 = 1 var ( defaultMaxMessageSize uint32 = 48000000 - errResponseTooLarge error = errors.New("length of read message too large") + errResponseTooLarge = errors.New("length of read message too large") errLoadBalancedStateMismatch = errors.New("driver attempted to initialize in load balancing mode, but the server does not support this mode") ) func nextConnectionID() uint64 { return atomic.AddUint64(&globalConnectionID, 1) } type connection struct { + // state must be accessed using the atomic package and should be at the beginning of the struct. + // - atomic bug: https://pkg.go.dev/sync/atomic#pkg-note-BUG + // - suggested layout: https://go101.org/article/memory-layout.html + state int64 + id string nc net.Conn // When nil, the connection is closed. addr address.Address @@ -46,15 +57,12 @@ type connection struct { idleDeadline atomic.Value // Stores a time.Time readTimeout time.Duration writeTimeout time.Duration - descMu sync.RWMutex // Guards desc. TODO: Remove with or after GODRIVER-2038. desc description.Server - isMasterRTT time.Duration + helloRTT time.Duration compressor wiremessage.CompressorID zliblevel int zstdLevel int - connected int32 // must be accessed using the sync/atomic package connectDone chan struct{} - connectErr error config *connectionConfig cancelConnectContext context.CancelFunc connectContextMade chan struct{} @@ -62,21 +70,17 @@ type connection struct { currentlyStreaming bool connectContextMutex sync.Mutex cancellationListener cancellationListener + serverConnectionID *int32 // the server's ID for this client's connection // pool related fields - pool *pool - poolID uint64 - generation uint64 - expireReason string - poolMonitor *event.PoolMonitor + pool *pool + poolID uint64 + generation uint64 } // newConnection handles the creation of a connection. It does not connect the connection. -func newConnection(addr address.Address, opts ...ConnectionOption) (*connection, error) { - cfg, err := newConnectionConfig(opts...) - if err != nil { - return nil, err - } +func newConnection(addr address.Address, opts ...ConnectionOption) *connection { + cfg := newConnectionConfig(opts...) id := fmt.Sprintf("%s[-%d]", addr, nextConnectionID()) @@ -90,28 +94,15 @@ func newConnection(addr address.Address, opts ...ConnectionOption) (*connection, config: cfg, connectContextMade: make(chan struct{}), cancellationListener: internal.NewCancellationListener(), - poolMonitor: cfg.poolMonitor, } // Connections to non-load balanced deployments should eagerly set the generation numbers so errors encountered // at any point during connection establishment can be processed without the connection being considered stale. if !c.config.loadBalanced { c.setGenerationNumber() } - atomic.StoreInt32(&c.connected, initialized) - - return c, nil -} - -func (c *connection) processInitializationError(opCtx context.Context, err error) { - atomic.StoreInt32(&c.connected, disconnected) - if c.nc != nil { - _ = c.nc.Close() - } + atomic.StoreInt64(&c.state, connInitialized) - c.connectErr = ConnectionError{Wrapped: err, init: true} - if c.config.errorHandlingCallback != nil { - c.config.errorHandlingCallback(opCtx, c.connectErr, c.generation, c.desc.ServiceID) - } + return c } // setGenerationNumber sets the connection's generation number if a callback has been provided to do so in connection @@ -135,14 +126,28 @@ func (c *connection) hasGenerationNumber() bool { return c.desc.LoadBalanced() } -// connect handles the I/O for a connection. It will dial, configure TLS, and perform -// initialization handshakes. -func (c *connection) connect(ctx context.Context) { - if !atomic.CompareAndSwapInt32(&c.connected, initialized, connected) { - return +// connect handles the I/O for a connection. It will dial, configure TLS, and perform initialization +// handshakes. All errors returned by connect are considered "before the handshake completes" and +// must be handled by calling the appropriate SDAM handshake error handler. +func (c *connection) connect(ctx context.Context) (err error) { + if !atomic.CompareAndSwapInt64(&c.state, connInitialized, connConnected) { + return nil } + defer close(c.connectDone) + // If connect returns an error, set the connection status as disconnected and close the + // underlying net.Conn if it was created. + defer func() { + if err != nil { + atomic.StoreInt64(&c.state, connDisconnected) + + if c.nc != nil { + _ = c.nc.Close() + } + } + }() + // Create separate contexts for dialing a connection and doing the MongoDB/auth handshakes. // // handshakeCtx is simply a cancellable version of ctx because there's no default timeout that needs to be applied @@ -181,12 +186,9 @@ func (c *connection) connect(ctx context.Context) { close(c.connectContextMade) // Assign the result of DialContext to a temporary net.Conn to ensure that c.nc is not set in an error case. - var err error - var tempNc net.Conn - tempNc, err = c.config.dialer.DialContext(dialCtx, c.addr.Network(), c.addr.String()) + tempNc, err := c.config.dialer.DialContext(dialCtx, c.addr.Network(), c.addr.String()) if err != nil { - c.processInitializationError(ctx, err) - return + return ConnectionError{Wrapped: err, init: true} } c.nc = tempNc @@ -201,25 +203,15 @@ func (c *connection) connect(ctx context.Context) { } tlsNc, err := configureTLS(dialCtx, c.config.tlsConnectionSource, c.nc, c.addr, tlsConfig, ocspOpts) if err != nil { - c.processInitializationError(ctx, err) - return + return ConnectionError{Wrapped: err, init: true} } c.nc = tlsNc } - c.bumpIdleDeadline() - - // running isMaster and authentication is handled by a handshaker on the configuration instance. + // running hello and authentication is handled by a handshaker on the configuration instance. handshaker := c.config.handshaker if handshaker == nil { - if c.poolMonitor != nil { - c.poolMonitor.Event(&event.PoolEvent{ - Type: event.ConnectionReady, - Address: c.addr.String(), - ConnectionID: c.poolID, - }) - } - return + return nil } var handshakeInfo driver.HandshakeInformation @@ -229,10 +221,9 @@ func (c *connection) connect(ctx context.Context) { if err == nil { // We only need to retain the Description field as the connection's description. The authentication-related // fields in handshakeInfo are tracked by the handshaker if necessary. - c.descMu.Lock() c.desc = handshakeInfo.Description - c.descMu.Unlock() - c.isMasterRTT = time.Since(handshakeStartTime) + c.serverConnectionID = handshakeInfo.ServerConnectionID + c.helloRTT = time.Since(handshakeStartTime) // If the application has indicated that the cluster is load balanced, ensure the server has included serviceId // in its handshake response to signal that it knows it's behind an LB as well. @@ -255,8 +246,7 @@ func (c *connection) connect(ctx context.Context) { // We have a failed handshake here if err != nil { - c.processInitializationError(ctx, err) - return + return ConnectionError{Wrapped: err, init: true} } if len(c.desc.Compression) > 0 { @@ -287,20 +277,13 @@ func (c *connection) connect(ctx context.Context) { } } } - if c.poolMonitor != nil { - c.poolMonitor.Event(&event.PoolEvent{ - Type: event.ConnectionReady, - Address: c.addr.String(), - ConnectionID: c.poolID, - }) - } + return nil } -func (c *connection) wait() error { +func (c *connection) wait() { if c.connectDone != nil { <-c.connectDone } - return c.connectErr } func (c *connection) closeConnectContext() { @@ -345,7 +328,7 @@ func (c *connection) cancellationListenerCallback() { func (c *connection) writeWireMessage(ctx context.Context, wm []byte) error { var err error - if atomic.LoadInt32(&c.connected) != connected { + if atomic.LoadInt64(&c.state) != connConnected { return ConnectionError{ConnectionID: c.id, message: "connection is closed"} } select { @@ -379,7 +362,6 @@ func (c *connection) writeWireMessage(ctx context.Context, wm []byte) error { } } - c.bumpIdleDeadline() return nil } @@ -402,7 +384,7 @@ func (c *connection) write(ctx context.Context, wm []byte) (err error) { // readWireMessage reads a wiremessage from the connection. The dst parameter will be overwritten. func (c *connection) readWireMessage(ctx context.Context, dst []byte) ([]byte, error) { - if atomic.LoadInt32(&c.connected) != connected { + if atomic.LoadInt64(&c.state) != connConnected { return dst, ConnectionError{ConnectionID: c.id, message: "connection is closed"} } @@ -444,7 +426,6 @@ func (c *connection) readWireMessage(ctx context.Context, dst []byte) ([]byte, e } } - c.bumpIdleDeadline() return dst, nil } @@ -476,7 +457,7 @@ func (c *connection) read(ctx context.Context, dst []byte) (bytesRead []byte, er // read the length as an int32 size := (int32(sizeBuf[0])) | (int32(sizeBuf[1]) << 8) | (int32(sizeBuf[2]) << 16) | (int32(sizeBuf[3]) << 24) - // In the case of an isMaster response where MaxMessageSize has not yet been set, use the hard-coded + // In the case of a hello response where MaxMessageSize has not yet been set, use the hard-coded // defaultMaxMessageSize instead. maxMessageSize := c.desc.MaxMessageSize if maxMessageSize == 0 { @@ -505,7 +486,7 @@ func (c *connection) read(ctx context.Context, dst []byte) (bytesRead []byte, er func (c *connection) close() error { // Overwrite the connection state as the first step so only the first close call will execute. - if !atomic.CompareAndSwapInt32(&c.connected, connected, disconnected) { + if !atomic.CompareAndSwapInt64(&c.state, connConnected, connDisconnected) { return nil } @@ -518,7 +499,7 @@ func (c *connection) close() error { } func (c *connection) closed() bool { - return atomic.LoadInt32(&c.connected) == disconnected + return atomic.LoadInt64(&c.state) == connDisconnected } func (c *connection) idleTimeoutExpired() bool { @@ -564,6 +545,10 @@ func (c *connection) ID() string { return c.id } +func (c *connection) ServerConnectionID() *int32 { + return c.serverConnectionID +} + // initConnection is an adapter used during connection initialization. It has the minimum // functionality necessary to implement the driver.Connection interface, which is required to pass a // *connection to a Handshaker. @@ -611,6 +596,10 @@ type Connection struct { refCount int cleanupPoolFn func() + // cleanupServerFn resets the server state when a connection is returned to the connection pool + // via Close() or expired via Expire(). + cleanupServerFn func() + mu sync.RWMutex } @@ -707,11 +696,15 @@ func (c *Connection) Expire() error { } func (c *Connection) cleanupReferences() error { - err := c.pool.put(c.connection) + err := c.pool.checkIn(c.connection) if c.cleanupPoolFn != nil { c.cleanupPoolFn() c.cleanupPoolFn = nil } + if c.cleanupServerFn != nil { + c.cleanupServerFn() + c.cleanupServerFn = nil + } c.connection = nil return err } @@ -810,9 +803,6 @@ func (c *Connection) unpin(reason string) error { return nil } -var notMasterCodes = []int32{10107, 13435} -var recoveringCodes = []int32{11600, 11602, 13436, 189, 91} - func configureTLS(ctx context.Context, tlsConnSource tlsConnectionSource, nc net.Conn, @@ -820,7 +810,6 @@ func configureTLS(ctx context.Context, config *tls.Config, ocspOpts *ocsp.VerifyOptions, ) (net.Conn, error) { - // Ensure config.ServerName is always set for SNI. if config.ServerName == "" { hostname := addr.String() @@ -834,27 +823,15 @@ func configureTLS(ctx context.Context, } client := tlsConnSource.Client(nc, config) - errChan := make(chan error, 1) - go func() { - errChan <- client.Handshake() - }() - - select { - case err := <-errChan: - if err != nil { - return nil, err - } - - // Only do OCSP verification if TLS verification is requested. - if config.InsecureSkipVerify { - break - } + if err := clientHandshake(ctx, client); err != nil { + return nil, err + } + // Only do OCSP verification if TLS verification is requested. + if !config.InsecureSkipVerify { if ocspErr := ocsp.Verify(ctx, client.ConnectionState(), ocspOpts); ocspErr != nil { return nil, ocspErr } - case <-ctx.Done(): - return nil, ctx.Err() } return client, nil } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_legacy_command_metadata.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_legacy_command_metadata.go deleted file mode 100644 index dc333efb3..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_legacy_command_metadata.go +++ /dev/null @@ -1,28 +0,0 @@ -package topology - -import "time" - -// commandMetadata contains metadata about a command sent to the server. -type commandMetadata struct { - Name string - Time time.Time - Legacy bool - FullCollectionName string -} - -// createMetadata creates metadata for a command. -func createMetadata(name string, legacy bool, fullCollName string) *commandMetadata { - return &commandMetadata{ - Name: name, - Time: time.Now(), - Legacy: legacy, - FullCollectionName: fullCollName, - } -} - -// TimeDifference returns the difference between now and the time a command was sent in nanoseconds. -func (cm *commandMetadata) TimeDifference() int64 { - t := time.Now() - duration := t.Sub(cm.Time) - return duration.Nanoseconds() -} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_options.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_options.go index b9dac8c40..24507aa1c 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_options.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_options.go @@ -40,13 +40,11 @@ type Handshaker = driver.Handshaker type generationNumberFn func(serviceID *primitive.ObjectID) uint64 type connectionConfig struct { - appName string connectTimeout time.Duration dialer Dialer handshaker Handshaker idleTimeout time.Duration cmdMonitor *event.CommandMonitor - poolMonitor *event.PoolMonitor readTimeout time.Duration writeTimeout time.Duration tlsConfig *tls.Config @@ -55,13 +53,12 @@ type connectionConfig struct { zstdLevel *int ocspCache ocsp.Cache disableOCSPEndpointCheck bool - errorHandlingCallback func(opCtx context.Context, err error, startGenNum uint64, svcID *primitive.ObjectID) tlsConnectionSource tlsConnectionSource loadBalanced bool getGenerationFn generationNumberFn } -func newConnectionConfig(opts ...ConnectionOption) (*connectionConfig, error) { +func newConnectionConfig(opts ...ConnectionOption) *connectionConfig { cfg := &connectionConfig{ connectTimeout: 30 * time.Second, dialer: nil, @@ -69,139 +66,111 @@ func newConnectionConfig(opts ...ConnectionOption) (*connectionConfig, error) { } for _, opt := range opts { - err := opt(cfg) - if err != nil { - return nil, err + if opt == nil { + continue } + opt(cfg) } if cfg.dialer == nil { cfg.dialer = &net.Dialer{} } - return cfg, nil + return cfg } // ConnectionOption is used to configure a connection. -type ConnectionOption func(*connectionConfig) error +type ConnectionOption func(*connectionConfig) func withTLSConnectionSource(fn func(tlsConnectionSource) tlsConnectionSource) ConnectionOption { - return func(c *connectionConfig) error { + return func(c *connectionConfig) { c.tlsConnectionSource = fn(c.tlsConnectionSource) - return nil - } -} - -func withErrorHandlingCallback(fn func(opCtx context.Context, err error, startGenNum uint64, svcID *primitive.ObjectID)) ConnectionOption { - return func(c *connectionConfig) error { - c.errorHandlingCallback = fn - return nil } } // WithCompressors sets the compressors that can be used for communication. func WithCompressors(fn func([]string) []string) ConnectionOption { - return func(c *connectionConfig) error { + return func(c *connectionConfig) { c.compressors = fn(c.compressors) - return nil } } // WithConnectTimeout configures the maximum amount of time a dial will wait for a // Connect to complete. The default is 30 seconds. func WithConnectTimeout(fn func(time.Duration) time.Duration) ConnectionOption { - return func(c *connectionConfig) error { + return func(c *connectionConfig) { c.connectTimeout = fn(c.connectTimeout) - return nil } } // WithDialer configures the Dialer to use when making a new connection to MongoDB. func WithDialer(fn func(Dialer) Dialer) ConnectionOption { - return func(c *connectionConfig) error { + return func(c *connectionConfig) { c.dialer = fn(c.dialer) - return nil } } // WithHandshaker configures the Handshaker that wll be used to initialize newly // dialed connections. func WithHandshaker(fn func(Handshaker) Handshaker) ConnectionOption { - return func(c *connectionConfig) error { + return func(c *connectionConfig) { c.handshaker = fn(c.handshaker) - return nil } } // WithIdleTimeout configures the maximum idle time to allow for a connection. func WithIdleTimeout(fn func(time.Duration) time.Duration) ConnectionOption { - return func(c *connectionConfig) error { + return func(c *connectionConfig) { c.idleTimeout = fn(c.idleTimeout) - return nil } } // WithReadTimeout configures the maximum read time for a connection. func WithReadTimeout(fn func(time.Duration) time.Duration) ConnectionOption { - return func(c *connectionConfig) error { + return func(c *connectionConfig) { c.readTimeout = fn(c.readTimeout) - return nil } } // WithWriteTimeout configures the maximum write time for a connection. func WithWriteTimeout(fn func(time.Duration) time.Duration) ConnectionOption { - return func(c *connectionConfig) error { + return func(c *connectionConfig) { c.writeTimeout = fn(c.writeTimeout) - return nil } } // WithTLSConfig configures the TLS options for a connection. func WithTLSConfig(fn func(*tls.Config) *tls.Config) ConnectionOption { - return func(c *connectionConfig) error { + return func(c *connectionConfig) { c.tlsConfig = fn(c.tlsConfig) - return nil } } // WithMonitor configures a event for command monitoring. func WithMonitor(fn func(*event.CommandMonitor) *event.CommandMonitor) ConnectionOption { - return func(c *connectionConfig) error { + return func(c *connectionConfig) { c.cmdMonitor = fn(c.cmdMonitor) - return nil - } -} - -// withPoolMonitor configures a event for connection monitoring. -func withPoolMonitor(fn func(*event.PoolMonitor) *event.PoolMonitor) ConnectionOption { - return func(c *connectionConfig) error { - c.poolMonitor = fn(c.poolMonitor) - return nil } } // WithZlibLevel sets the zLib compression level. func WithZlibLevel(fn func(*int) *int) ConnectionOption { - return func(c *connectionConfig) error { + return func(c *connectionConfig) { c.zlibLevel = fn(c.zlibLevel) - return nil } } // WithZstdLevel sets the zstd compression level. func WithZstdLevel(fn func(*int) *int) ConnectionOption { - return func(c *connectionConfig) error { + return func(c *connectionConfig) { c.zstdLevel = fn(c.zstdLevel) - return nil } } // WithOCSPCache specifies a cache to use for OCSP verification. func WithOCSPCache(fn func(ocsp.Cache) ocsp.Cache) ConnectionOption { - return func(c *connectionConfig) error { + return func(c *connectionConfig) { c.ocspCache = fn(c.ocspCache) - return nil } } @@ -209,23 +178,20 @@ func WithOCSPCache(fn func(ocsp.Cache) ocsp.Cache) ConnectionOption { // to true, the driver will only check stapled responses and will continue the connection without reaching out to // OCSP responders. func WithDisableOCSPEndpointCheck(fn func(bool) bool) ConnectionOption { - return func(c *connectionConfig) error { + return func(c *connectionConfig) { c.disableOCSPEndpointCheck = fn(c.disableOCSPEndpointCheck) - return nil } } // WithConnectionLoadBalanced specifies whether or not the connection is to a server behind a load balancer. func WithConnectionLoadBalanced(fn func(bool) bool) ConnectionOption { - return func(c *connectionConfig) error { + return func(c *connectionConfig) { c.loadBalanced = fn(c.loadBalanced) - return nil } } func withGenerationNumberFn(fn func(generationNumberFn) generationNumberFn) ConnectionOption { - return func(c *connectionConfig) error { + return func(c *connectionConfig) { c.getGenerationFn = fn(c.getGenerationFn) - return nil } } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/errors.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/errors.go index f3e2e3a7c..d3192cbb1 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/errors.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/errors.go @@ -11,7 +11,7 @@ type ConnectionError struct { ConnectionID string Wrapped error - // init will be set to true if this error occured during connection initialization or + // init will be set to true if this error occurred during connection initialization or // during a connection handshake. init bool message string @@ -21,7 +21,7 @@ type ConnectionError struct { func (e ConnectionError) Error() string { message := e.message if e.init { - fullMsg := "error occured during connection handshake" + fullMsg := "error occurred during connection handshake" if message != "" { fullMsg = fmt.Sprintf("%s: %s", fullMsg, message) } @@ -66,6 +66,7 @@ type WaitQueueTimeoutError struct { PinnedCursorConnections uint64 PinnedTransactionConnections uint64 maxPoolSize uint64 + totalConnectionCount int } // Error implements the error interface. @@ -75,10 +76,14 @@ func (w WaitQueueTimeoutError) Error() string { errorMsg = fmt.Sprintf("%s: %s", errorMsg, w.Wrapped.Error()) } - errorMsg = fmt.Sprintf("%s; maxPoolSize: %d, connections in use by cursors: %d, connections in use by transactions: %d", - errorMsg, w.maxPoolSize, w.PinnedCursorConnections, w.PinnedTransactionConnections) - return fmt.Sprintf("%s, connections in use by other operations: %d", errorMsg, - w.maxPoolSize-(w.PinnedCursorConnections+w.PinnedTransactionConnections)) + return fmt.Sprintf( + "%s; maxPoolSize: %d, connections in use by cursors: %d"+ + ", connections in use by transactions: %d, connections in use by other operations: %d", + errorMsg, + w.maxPoolSize, + w.PinnedCursorConnections, + w.PinnedTransactionConnections, + uint64(w.totalConnectionCount)-w.PinnedCursorConnections-w.PinnedTransactionConnections) } // Unwrap returns the underlying error. diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/fsm.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/fsm.go index 7a7f7b3cf..46bcd19a3 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/fsm.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/fsm.go @@ -18,7 +18,7 @@ import ( var ( // SupportedWireVersions is the range of wire versions supported by the driver. - SupportedWireVersions = description.NewVersionRange(2, 13) + SupportedWireVersions = description.NewVersionRange(2, 15) ) const ( @@ -46,7 +46,7 @@ func newFSM() *fsm { // // apply should operation on immutable descriptions so we don't have to lock for the entire time we're applying the // server description. -func (f *fsm) apply(s description.Server) (description.Topology, description.Server, error) { +func (f *fsm) apply(s description.Server) (description.Topology, description.Server) { newServers := make([]description.Server, len(f.Servers)) copy(newServers, f.Servers) @@ -77,7 +77,7 @@ func (f *fsm) apply(s description.Server) (description.Topology, description.Ser } if _, ok := f.findServer(s.Addr); !ok { - return f.Topology, s, nil + return f.Topology, s } updatedDesc := s @@ -107,7 +107,7 @@ func (f *fsm) apply(s description.Server) (description.Topology, description.Ser MinSupportedMongoDBVersion, ) f.Topology.CompatibilityErr = f.compatibilityErr - return f.Topology, s, nil + return f.Topology, s } if server.WireVersion.Min > SupportedWireVersions.Max { @@ -119,14 +119,14 @@ func (f *fsm) apply(s description.Server) (description.Topology, description.Ser SupportedWireVersions.Max, ) f.Topology.CompatibilityErr = f.compatibilityErr - return f.Topology, s, nil + return f.Topology, s } } } f.compatible.Store(true) f.compatibilityErr = nil - return f.Topology, updatedDesc, nil + return f.Topology, updatedDesc } func (f *fsm) applyToReplicaSetNoPrimary(s description.Server) description.Server { @@ -185,7 +185,7 @@ func (f *fsm) applyToSingle(s description.Server) description.Server { f.replaceServer(s) case description.RSPrimary, description.RSSecondary, description.RSArbiter, description.RSMember, description.RSGhost: // A replica set name can be provided when creating a direct connection. In this case, if the set name returned - // by the isMaster response doesn't match up with the one provided during configuration, the server description + // by the hello response doesn't match up with the one provided during configuration, the server description // is replaced with a default Unknown description. // // We create a new server description rather than doing s.Kind = description.Unknown because the other fields, @@ -239,7 +239,7 @@ func (f *fsm) updateRSFromPrimary(s description.Server) { return } - if s.SetVersion != 0 && !bytes.Equal(s.ElectionID[:], primitive.NilObjectID[:]) { + if s.SetVersion != 0 && !s.ElectionID.IsZero() { if f.maxSetVersion > s.SetVersion || bytes.Compare(f.maxElectionID[:], s.ElectionID[:]) == 1 { f.replaceServer(description.Server{ Addr: s.Addr, @@ -376,12 +376,10 @@ func (f *fsm) removeServerByAddr(addr address.Address) { } } -func (f *fsm) replaceServer(s description.Server) bool { +func (f *fsm) replaceServer(s description.Server) { if i, ok := f.findServer(s.Addr); ok { f.setServer(i, s) - return true } - return false } func (f *fsm) setServer(i int, s description.Server) { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/hanging_tls_conn_1_16.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/hanging_tls_conn_1_16.go new file mode 100644 index 000000000..b4a690c4d --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/hanging_tls_conn_1_16.go @@ -0,0 +1,37 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +//go:build !go1.17 +// +build !go1.17 + +package topology + +import ( + "crypto/tls" + "time" +) + +// hangingTLSConn is an implementation of tlsConn that wraps the tls.Conn type and overrides the Handshake function to +// sleep for a fixed amount of time. +type hangingTLSConn struct { + *tls.Conn + sleepTime time.Duration +} + +var _ tlsConn = (*hangingTLSConn)(nil) + +func newHangingTLSConn(conn *tls.Conn, sleepTime time.Duration) *hangingTLSConn { + return &hangingTLSConn{ + Conn: conn, + sleepTime: sleepTime, + } +} + +// Handshake implements the tlsConn interface on Go 1.16 and less. +func (h *hangingTLSConn) Handshake() error { + time.Sleep(h.sleepTime) + return h.Conn.Handshake() +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/hanging_tls_conn_1_17.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/hanging_tls_conn_1_17.go new file mode 100644 index 000000000..1058b86c5 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/hanging_tls_conn_1_17.go @@ -0,0 +1,44 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +//go:build go1.17 +// +build go1.17 + +package topology + +import ( + "context" + "crypto/tls" + "time" +) + +// hangingTLSConn is an implementation of tlsConn that wraps the tls.Conn type and overrides the HandshakeContext function to +// sleep for a fixed amount of time. +type hangingTLSConn struct { + *tls.Conn + sleepTime time.Duration +} + +var _ tlsConn = (*hangingTLSConn)(nil) + +func newHangingTLSConn(conn *tls.Conn, sleepTime time.Duration) *hangingTLSConn { + return &hangingTLSConn{ + Conn: conn, + sleepTime: sleepTime, + } +} + +// HandshakeContext implements the tlsConn interface on Go 1.17 and higher. +func (h *hangingTLSConn) HandshakeContext(ctx context.Context) error { + timer := time.NewTimer(h.sleepTime) + defer timer.Stop() + select { + case <-timer.C: + case <-ctx.Done(): + } + + return h.Conn.HandshakeContext(ctx) +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool.go index 16a3c31d0..f63ed7967 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool.go @@ -8,7 +8,7 @@ package topology import ( "context" - "math" + "fmt" "sync" "sync/atomic" "time" @@ -16,15 +16,22 @@ import ( "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/event" "go.mongodb.org/mongo-driver/mongo/address" - "golang.org/x/sync/semaphore" + "go.mongodb.org/mongo-driver/x/mongo/driver" ) -// ErrPoolConnected is returned from an attempt to connect an already connected pool -var ErrPoolConnected = PoolError("attempted to Connect to an already connected pool") +// Connection pool state constants. +const ( + poolPaused int = iota + poolReady + poolClosed +) + +// ErrPoolNotPaused is returned when attempting to mark a connection pool "ready" that is not +// currently "paused". +var ErrPoolNotPaused = PoolError("only a paused pool can be marked ready") -// ErrPoolDisconnected is returned from an attempt to Close an already disconnected -// or disconnecting pool. -var ErrPoolDisconnected = PoolError("attempted to check out a connection from closed connection pool") +// ErrPoolClosed is returned when attempting to check out a connection from a closed pool. +var ErrPoolClosed = PoolError("attempted to check out a connection from closed connection pool") // ErrConnectionClosed is returned from an attempt to use an already closed connection. var ErrConnectionClosed = ConnectionError{ConnectionID: "", message: "connection is closed"} @@ -35,293 +42,312 @@ var ErrWrongPool = PoolError("connection does not belong to this pool") // PoolError is an error returned from a Pool method. type PoolError string -// maintainInterval is the interval at which the background routine to close stale connections will be run. -var maintainInterval = time.Minute - func (pe PoolError) Error() string { return string(pe) } -// poolConfig contains all aspects of the pool that can be configured -type poolConfig struct { - Address address.Address - MinPoolSize uint64 - MaxPoolSize uint64 // MaxPoolSize is not used because handling the max number of connections in the pool is handled in server. This is only used for command monitoring - MaxIdleTime time.Duration - PoolMonitor *event.PoolMonitor +// poolClearedError is an error returned when the connection pool is cleared or currently paused. It +// is a retryable error. +type poolClearedError struct { + err error + address address.Address +} + +func (pce poolClearedError) Error() string { + return fmt.Sprintf( + "connection pool for %v was cleared because another operation failed with: %v", + pce.address, + pce.err) } -// checkOutResult is all the values that can be returned from a checkOut -type checkOutResult struct { - c *connection - err error - reason string +// Retryable returns true. All poolClearedErrors are retryable. +func (poolClearedError) Retryable() bool { return true } + +// Assert that poolClearedError is a driver.RetryablePoolError. +var _ driver.RetryablePoolError = poolClearedError{} + +// poolConfig contains all aspects of the pool that can be configured +type poolConfig struct { + Address address.Address + MinPoolSize uint64 + MaxPoolSize uint64 + MaxConnecting uint64 + MaxIdleTime time.Duration + MaintainInterval time.Duration + PoolMonitor *event.PoolMonitor + handshakeErrFn func(error, uint64, *primitive.ObjectID) } -// pool is a wrapper of resource pool that follows the CMAP spec for connection pools type pool struct { - address address.Address - opts []ConnectionOption - conns *resourcePool // pool for non-checked out connections - generation *poolGenerationMap - monitor *event.PoolMonitor + // The following integer fields must be accessed using the atomic package + // and should be at the beginning of the struct. + // - atomic bug: https://pkg.go.dev/sync/atomic#pkg-note-BUG + // - suggested layout: https://go101.org/article/memory-layout.html - // Must be accessed using the atomic package. - connected int32 + nextID uint64 // nextID is the next pool ID for a new connection. pinnedCursorConnections uint64 pinnedTransactionConnections uint64 - nextid uint64 - opened map[uint64]*connection // opened holds all of the currently open connections. - sem *semaphore.Weighted - sync.Mutex + address address.Address + minSize uint64 + maxSize uint64 + maxConnecting uint64 + monitor *event.PoolMonitor + + // handshakeErrFn is used to handle any errors that happen during connection establishment and + // handshaking. + handshakeErrFn func(error, uint64, *primitive.ObjectID) + + connOpts []ConnectionOption + generation *poolGenerationMap + + maintainInterval time.Duration // maintainInterval is the maintain() loop interval. + maintainReady chan struct{} // maintainReady is a signal channel that starts the maintain() loop when ready() is called. + backgroundDone *sync.WaitGroup // backgroundDone waits for all background goroutines to return. + + stateMu sync.RWMutex // stateMu guards state, lastClearErr + state int // state is the current state of the connection pool. + lastClearErr error // lastClearErr is the last error that caused the pool to be cleared. + + // createConnectionsCond is the condition variable that controls when the createConnections() + // loop runs or waits. Its lock guards cancelBackgroundCtx, conns, and newConnWait. Any changes + // to the state of the guarded values must be made while holding the lock to prevent undefined + // behavior in the createConnections() waiting logic. + createConnectionsCond *sync.Cond + cancelBackgroundCtx context.CancelFunc // cancelBackgroundCtx is called to signal background goroutines to stop. + conns map[uint64]*connection // conns holds all currently open connections. + newConnWait wantConnQueue // newConnWait holds all wantConn requests for new connections. + + idleMu sync.Mutex // idleMu guards idleConns, idleConnWait + idleConns []*connection // idleConns holds all idle connections. + idleConnWait wantConnQueue // idleConnWait holds all wantConn requests for idle connections. } -// connectionExpiredFunc checks if a given connection is stale and should be removed from the resource pool -func connectionExpiredFunc(v interface{}) bool { - if v == nil { - return true - } +// getState returns the current state of the pool. Callers must not hold the stateMu lock. +func (p *pool) getState() int { + p.stateMu.RLock() + defer p.stateMu.RUnlock() - c, ok := v.(*connection) - if !ok { - return true - } + return p.state +} +// connectionPerished checks if a given connection is perished and should be removed from the pool. +func connectionPerished(conn *connection) (string, bool) { switch { - case atomic.LoadInt32(&c.pool.connected) != connected: - c.expireReason = event.ReasonPoolClosed - case c.closed(): + case conn.closed(): // A connection would only be closed if it encountered a network error during an operation and closed itself. - c.expireReason = event.ReasonConnectionErrored - case c.idleTimeoutExpired(): - c.expireReason = event.ReasonIdle - case c.pool.stale(c): - c.expireReason = event.ReasonStale - default: - return false + return event.ReasonError, true + case conn.idleTimeoutExpired(): + return event.ReasonIdle, true + case conn.pool.stale(conn): + return event.ReasonStale, true } - - return true + return "", false } -// connectionCloseFunc closes a given connection. If ctx is nil, the closing will occur in the background -func connectionCloseFunc(v interface{}) { - c, ok := v.(*connection) - if !ok || v == nil { - return +// newPool creates a new pool. It will use the provided options when creating connections. +func newPool(config poolConfig, connOpts ...ConnectionOption) *pool { + if config.MaxIdleTime != time.Duration(0) { + connOpts = append(connOpts, WithIdleTimeout(func(_ time.Duration) time.Duration { return config.MaxIdleTime })) } - // The resource pool will only close connections if they're expired or the pool is being disconnected and - // resourcePool.Close() is called. For the former case, c.expireReason will be set. In the latter, it will not, so - // we use ReasonPoolClosed. - reason := c.expireReason - if c.expireReason == "" { - reason = event.ReasonPoolClosed + var maxConnecting uint64 = 2 + if config.MaxConnecting > 0 { + maxConnecting = config.MaxConnecting } - _ = c.pool.removeConnection(c, reason) - go func() { - _ = c.pool.closeConnection(c) - }() -} + maintainInterval := 10 * time.Second + if config.MaintainInterval != 0 { + maintainInterval = config.MaintainInterval + } -// connectionInitFunc returns an init function for the resource pool that will make new connections for this pool -func (p *pool) connectionInitFunc() interface{} { - c, _, err := p.makeNewConnection() - if err != nil { - return nil + pool := &pool{ + address: config.Address, + minSize: config.MinPoolSize, + maxSize: config.MaxPoolSize, + maxConnecting: maxConnecting, + monitor: config.PoolMonitor, + handshakeErrFn: config.handshakeErrFn, + connOpts: connOpts, + generation: newPoolGenerationMap(), + state: poolPaused, + maintainInterval: maintainInterval, + maintainReady: make(chan struct{}, 1), + backgroundDone: &sync.WaitGroup{}, + createConnectionsCond: sync.NewCond(&sync.Mutex{}), + conns: make(map[uint64]*connection, config.MaxPoolSize), + idleConns: make([]*connection, 0, config.MaxPoolSize), } + pool.connOpts = append(pool.connOpts, withGenerationNumberFn(func(_ generationNumberFn) generationNumberFn { return pool.getGenerationForNewConnection })) - go c.connect(context.Background()) + pool.generation.connect() - return c -} + // Create a Context with cancellation that's used to signal the createConnections() and + // maintain() background goroutines to stop. Also create a "backgroundDone" WaitGroup that is + // used to wait for the background goroutines to return. + var ctx context.Context + ctx, pool.cancelBackgroundCtx = context.WithCancel(context.Background()) -// newPool creates a new pool that will hold size number of idle connections. It will use the -// provided options when creating connections. -func newPool(config poolConfig, connOpts ...ConnectionOption) (*pool, error) { - opts := connOpts - if config.MaxIdleTime != time.Duration(0) { - opts = append(opts, WithIdleTimeout(func(_ time.Duration) time.Duration { return config.MaxIdleTime })) - } - if config.PoolMonitor != nil { - opts = append(opts, withPoolMonitor(func(_ *event.PoolMonitor) *event.PoolMonitor { return config.PoolMonitor })) - } - - var maxConns = config.MaxPoolSize - if maxConns == 0 { - maxConns = math.MaxInt64 + for i := 0; i < int(pool.maxConnecting); i++ { + pool.backgroundDone.Add(1) + go pool.createConnections(ctx, pool.backgroundDone) } - pool := &pool{ - address: config.Address, - monitor: config.PoolMonitor, - connected: disconnected, - opened: make(map[uint64]*connection), - opts: opts, - sem: semaphore.NewWeighted(int64(maxConns)), - generation: newPoolGenerationMap(), - } - pool.opts = append(pool.opts, withGenerationNumberFn(func(_ generationNumberFn) generationNumberFn { return pool.getGenerationForNewConnection })) - - // we do not pass in config.MaxPoolSize because we manage the max size at this level rather than the resource pool level - rpc := resourcePoolConfig{ - MaxSize: maxConns, - MinSize: config.MinPoolSize, - MaintainInterval: maintainInterval, - ExpiredFn: connectionExpiredFunc, - CloseFn: connectionCloseFunc, - InitFn: pool.connectionInitFunc, + // If maintainInterval is not positive, don't start the maintain() goroutine. Expect that + // negative values are only used in testing; this config value is not user-configurable. + if maintainInterval > 0 { + pool.backgroundDone.Add(1) + go pool.maintain(ctx, pool.backgroundDone) } if pool.monitor != nil { pool.monitor.Event(&event.PoolEvent{ Type: event.PoolCreated, PoolOptions: &event.MonitorPoolOptions{ - MaxPoolSize: rpc.MaxSize, - MinPoolSize: rpc.MinSize, - WaitQueueTimeoutMS: uint64(config.MaxIdleTime) / uint64(time.Millisecond), + MaxPoolSize: config.MaxPoolSize, + MinPoolSize: config.MinPoolSize, }, Address: pool.address.String(), }) } - rp, err := newResourcePool(rpc) - if err != nil { - return nil, err - } - pool.conns = rp - - return pool, nil + return pool } // stale checks if a given connection's generation is below the generation of the pool -func (p *pool) stale(c *connection) bool { - if c == nil { - return true +func (p *pool) stale(conn *connection) bool { + return conn == nil || p.generation.stale(conn.desc.ServiceID, conn.generation) +} + +// ready puts the pool into the "ready" state and starts the background connection creation and +// monitoring goroutines. ready must be called before connections can be checked out. An unused, +// connected pool must be closed or it will leak goroutines and will not be garbage collected. +func (p *pool) ready() error { + // While holding the stateMu lock, set the pool to "ready" if it is currently "paused". + p.stateMu.Lock() + if p.state == poolReady { + p.stateMu.Unlock() + return nil + } + if p.state != poolPaused { + p.stateMu.Unlock() + return ErrPoolNotPaused } + p.lastClearErr = nil + p.state = poolReady + p.stateMu.Unlock() - c.descMu.RLock() - serviceID := c.desc.ServiceID - c.descMu.RUnlock() - return p.generation.stale(serviceID, c.generation) -} + // Signal maintain() to wake up immediately when marking the pool "ready". + select { + case p.maintainReady <- struct{}{}: + default: + } -// connect puts the pool into the connected state, allowing it to be used and will allow items to begin being processed from the wait queue -func (p *pool) connect() error { - if !atomic.CompareAndSwapInt32(&p.connected, disconnected, connected) { - return ErrPoolConnected + if p.monitor != nil { + p.monitor.Event(&event.PoolEvent{ + Type: event.PoolReady, + Address: p.address.String(), + }) } - p.generation.connect() - p.conns.initialize() + return nil } -// disconnect disconnects the pool and closes all connections including those both in and out of the pool -func (p *pool) disconnect(ctx context.Context) error { - if !atomic.CompareAndSwapInt32(&p.connected, connected, disconnecting) { - return ErrPoolDisconnected +// close closes the pool, closes all connections associated with the pool, and stops all background +// goroutines. All subsequent checkOut requests will return an error. An unused, ready pool must be +// closed or it will leak goroutines and will not be garbage collected. +func (p *pool) close(ctx context.Context) { + p.stateMu.Lock() + if p.state == poolClosed { + p.stateMu.Unlock() + return } + p.state = poolClosed + p.stateMu.Unlock() + + // Call cancelBackgroundCtx() to exit the maintain() and createConnections() background + // goroutines. Broadcast to the createConnectionsCond to wake up all createConnections() + // goroutines. We must hold the createConnectionsCond lock here because we're changing the + // condition by cancelling the "background goroutine" Context, even tho cancelling the Context + // is also synchronized by a lock. Otherwise, we run into an intermittent bug that prevents the + // createConnections() goroutines from exiting. + p.createConnectionsCond.L.Lock() + p.cancelBackgroundCtx() + p.createConnectionsCond.Broadcast() + p.createConnectionsCond.L.Unlock() + + // Wait for all background goroutines to exit. + p.backgroundDone.Wait() + + p.generation.disconnect() if ctx == nil { ctx = context.Background() } - p.conns.Close() - p.generation.disconnect() - - var err error - if dl, ok := ctx.Deadline(); ok { - // If we have a deadline then we interpret it as a request to gracefully shutdown. We wait - // until either all the connections have landed back in the pool (and have been closed) or - // until the timer is done. - ticker := time.NewTicker(1 * time.Second) + // If we have a deadline then we interpret it as a request to gracefully shutdown. We wait until + // either all the connections have been checked back into the pool (i.e. total open connections + // equals idle connections) or until the Context deadline is reached. + if _, ok := ctx.Deadline(); ok { + ticker := time.NewTicker(100 * time.Millisecond) defer ticker.Stop() - timer := time.NewTimer(time.Now().Sub(dl)) - defer timer.Stop() + + graceful: for { + if p.totalConnectionCount() == p.availableConnectionCount() { + break graceful + } + select { - case <-timer.C: + case <-ticker.C: case <-ctx.Done(): - case <-ticker.C: // Can we replace this with an actual signal channel? We will know when p.inflight hits zero from the close method. - p.Lock() - if len(p.opened) > 0 { - p.Unlock() - continue - } - p.Unlock() + break graceful + default: } - break } } - // We copy the remaining connections into a slice, then iterate it to close them. This allows us - // to use a single function to actually clean up and close connections at the expense of a - // double iteration in the worse case. - p.Lock() - toClose := make([]*connection, 0, len(p.opened)) - for _, pc := range p.opened { - toClose = append(toClose, pc) + // Empty the idle connections stack and try to deliver ErrPoolClosed to any waiting wantConns + // from idleConnWait while holding the idleMu lock. + p.idleMu.Lock() + p.idleConns = p.idleConns[:0] + for { + w := p.idleConnWait.popFront() + if w == nil { + break + } + w.tryDeliver(nil, ErrPoolClosed) } - p.Unlock() - for _, pc := range toClose { - _ = p.removeConnection(pc, event.ReasonPoolClosed) - _ = p.closeConnection(pc) // We don't care about errors while closing the connection. + p.idleMu.Unlock() + + // Collect all conns from the pool and try to deliver ErrPoolClosed to any waiting wantConns + // from newConnWait while holding the createConnectionsCond lock. We can't call removeConnection + // on the connections while holding any locks, so do that after we release the lock. + p.createConnectionsCond.L.Lock() + conns := make([]*connection, 0, len(p.conns)) + for _, conn := range p.conns { + conns = append(conns, conn) } - atomic.StoreInt32(&p.connected, disconnected) - p.conns.clearTotal() - - if p.monitor != nil { - p.monitor.Event(&event.PoolEvent{ - Type: event.PoolClosedEvent, - Address: p.address.String(), - }) + for { + w := p.newConnWait.popFront() + if w == nil { + break + } + w.tryDeliver(nil, ErrPoolClosed) } + p.createConnectionsCond.L.Unlock() - return err -} - -// makeNewConnection creates a new connection instance and emits a ConnectionCreatedEvent. The caller must call -// connection.connect on the returned instance before using it for operations. This function ensures that a -// ConnectionClosed event is published if there is an error after the ConnectionCreated event has been published. The -// caller must not hold the pool lock when calling this function. -func (p *pool) makeNewConnection() (*connection, string, error) { - c, err := newConnection(p.address, p.opts...) - if err != nil { - return nil, event.ReasonConnectionErrored, err + // Now that we're not holding any locks, remove all of the connections we collected from the + // pool. + for _, conn := range conns { + _ = p.removeConnection(conn, event.ReasonPoolClosed) + _ = p.closeConnection(conn) // We don't care about errors while closing the connection. } - c.pool = p - c.poolID = atomic.AddUint64(&p.nextid, 1) - if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ - Type: event.ConnectionCreated, - Address: p.address.String(), - ConnectionID: c.poolID, + Type: event.PoolClosedEvent, + Address: p.address.String(), }) } - - if atomic.LoadInt32(&p.connected) != connected { - // Manually publish a ConnectionClosed event here because the connection reference hasn't been stored and we - // need to ensure each ConnectionCreated event has a corresponding ConnectionClosed event. - if p.monitor != nil { - p.monitor.Event(&event.PoolEvent{ - Type: event.ConnectionClosed, - Address: p.address.String(), - ConnectionID: c.poolID, - Reason: event.ReasonPoolClosed, - }) - } - _ = p.closeConnection(c) // The pool is disconnected or disconnecting, ignore the error from closing the connection. - return nil, event.ReasonPoolClosed, ErrPoolDisconnected - } - - p.Lock() - p.opened[c.poolID] = c - p.Unlock() - - return c, "", nil - } func (p *pool) pinConnectionToCursor() { @@ -342,13 +368,27 @@ func (p *pool) unpinConnectionFromTransaction() { atomic.AddUint64(&p.pinnedTransactionConnections, ^uint64(0)) } -// Checkout returns a connection from the pool -func (p *pool) get(ctx context.Context) (*connection, error) { - if ctx == nil { - ctx = context.Background() +// checkOut checks out a connection from the pool. If an idle connection is not available, the +// checkOut enters a queue waiting for either the next idle or new connection. If the pool is not +// ready, checkOut returns an error. +// Based partially on https://cs.opensource.google/go/go/+/refs/tags/go1.16.6:src/net/http/transport.go;l=1324 +func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { + if p.monitor != nil { + p.monitor.Event(&event.PoolEvent{ + Type: event.GetStarted, + Address: p.address.String(), + }) } - if atomic.LoadInt32(&p.connected) != connected { + // Check the pool state while holding a stateMu read lock. If the pool state is not "ready", + // return an error. Do all of this while holding the stateMu read lock to prevent a state change between + // checking the state and entering the wait queue. Not holding the stateMu read lock here may + // allow a checkOut() to enter the wait queue after clear() pauses the pool and clears the wait + // queue, resulting in createConnections() doing work while the pool is "paused". + p.stateMu.RLock() + switch p.state { + case poolClosed: + p.stateMu.RUnlock() if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ Type: event.GetFailed, @@ -356,163 +396,123 @@ func (p *pool) get(ctx context.Context) (*connection, error) { Reason: event.ReasonPoolClosed, }) } - return nil, ErrPoolDisconnected - } - - err := p.sem.Acquire(ctx, 1) - if err != nil { + return nil, ErrPoolClosed + case poolPaused: + err := poolClearedError{err: p.lastClearErr, address: p.address} + p.stateMu.RUnlock() if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ Type: event.GetFailed, Address: p.address.String(), - Reason: event.ReasonTimedOut, + Reason: event.ReasonConnectionErrored, }) } - errWaitQueueTimeout := WaitQueueTimeoutError{ - Wrapped: ctx.Err(), - PinnedCursorConnections: atomic.LoadUint64(&p.pinnedCursorConnections), - PinnedTransactionConnections: atomic.LoadUint64(&p.pinnedTransactionConnections), - maxPoolSize: p.conns.maxSize, - } - return nil, errWaitQueueTimeout + return nil, err } - // This loop is so that we don't end up with more than maxPoolSize connections if p.conns.Maintain runs between - // calling p.conns.Get() and making the new connection - for { - if atomic.LoadInt32(&p.connected) != connected { + if ctx == nil { + ctx = context.Background() + } + + // Create a wantConn, which we will use to request an existing idle or new connection. Always + // cancel the wantConn if checkOut() returned an error to make sure any delivered connections + // are returned to the pool (e.g. if a connection was delivered immediately after the Context + // timed out). + w := newWantConn() + defer func() { + if err != nil { + w.cancel(p, err) + } + }() + + // Get in the queue for an idle connection. If getOrQueueForIdleConn returns true, it was able to + // immediately deliver an idle connection to the wantConn, so we can return the connection or + // error from the wantConn without waiting for "ready". + if delivered := p.getOrQueueForIdleConn(w); delivered { + // If delivered = true, we didn't enter the wait queue and will return either a connection + // or an error, so unlock the stateMu lock here. + p.stateMu.RUnlock() + + if w.err != nil { if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ Type: event.GetFailed, Address: p.address.String(), - Reason: event.ReasonPoolClosed, + Reason: event.ReasonConnectionErrored, }) } - p.sem.Release(1) - return nil, ErrPoolDisconnected + return nil, w.err } - connVal := p.conns.Get() - if c, ok := connVal.(*connection); ok && connVal != nil { - // call connect if not connected - if atomic.LoadInt32(&c.connected) == initialized { - c.connect(ctx) - } - - err := c.wait() - if err != nil { - // Call removeConnection to remove the connection reference and emit a ConnectionClosed event. - _ = p.removeConnection(c, event.ReasonConnectionErrored) - p.conns.decrementTotal() - p.sem.Release(1) - - if p.monitor != nil { - p.monitor.Event(&event.PoolEvent{ - Type: event.GetFailed, - Address: p.address.String(), - Reason: event.ReasonConnectionErrored, - }) - } - return nil, err - } - - if p.monitor != nil { - p.monitor.Event(&event.PoolEvent{ - Type: event.GetSucceeded, - Address: p.address.String(), - ConnectionID: c.poolID, - }) - } - return c, nil + if p.monitor != nil { + p.monitor.Event(&event.PoolEvent{ + Type: event.GetSucceeded, + Address: p.address.String(), + ConnectionID: w.conn.poolID, + }) } + return w.conn, nil + } - select { - case <-ctx.Done(): + // If we didn't get an immediately available idle connection, also get in the queue for a new + // connection while we're waiting for an idle connection. + p.queueForNewConn(w) + p.stateMu.RUnlock() + + // Wait for either the wantConn to be ready or for the Context to time out. + select { + case <-w.ready: + if w.err != nil { if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ Type: event.GetFailed, Address: p.address.String(), - Reason: event.ReasonTimedOut, + Reason: event.ReasonConnectionErrored, }) } - p.sem.Release(1) - return nil, ctx.Err() - default: - // The pool is empty, so we try to make a new connection. If incrementTotal fails, the resource pool has - // more resources than we previously thought, so we try to get a resource again. - made := p.conns.incrementTotal() - if !made { - continue - } - c, reason, err := p.makeNewConnection() - - if err != nil { - if p.monitor != nil { - // We only publish a GetFailed event because makeNewConnection has already published - // ConnectionClosed if needed. - p.monitor.Event(&event.PoolEvent{ - Type: event.GetFailed, - Address: p.address.String(), - Reason: reason, - }) - } - p.conns.decrementTotal() - p.sem.Release(1) - return nil, err - } - - c.connect(ctx) - // wait for conn to be connected - err = c.wait() - if err != nil { - // Call removeConnection to remove the connection reference and fire a ConnectionClosedEvent. - _ = p.removeConnection(c, event.ReasonConnectionErrored) - p.conns.decrementTotal() - p.sem.Release(1) - - if p.monitor != nil { - p.monitor.Event(&event.PoolEvent{ - Type: event.GetFailed, - Address: p.address.String(), - Reason: event.ReasonConnectionErrored, - }) - } - return nil, err - } + return nil, w.err + } - if p.monitor != nil { - p.monitor.Event(&event.PoolEvent{ - Type: event.GetSucceeded, - Address: p.address.String(), - ConnectionID: c.poolID, - }) - } - return c, nil + if p.monitor != nil { + p.monitor.Event(&event.PoolEvent{ + Type: event.GetSucceeded, + Address: p.address.String(), + ConnectionID: w.conn.poolID, + }) + } + return w.conn, nil + case <-ctx.Done(): + if p.monitor != nil { + p.monitor.Event(&event.PoolEvent{ + Type: event.GetFailed, + Address: p.address.String(), + Reason: event.ReasonTimedOut, + }) + } + return nil, WaitQueueTimeoutError{ + Wrapped: ctx.Err(), + PinnedCursorConnections: atomic.LoadUint64(&p.pinnedCursorConnections), + PinnedTransactionConnections: atomic.LoadUint64(&p.pinnedTransactionConnections), + maxPoolSize: p.maxSize, + totalConnectionCount: p.totalConnectionCount(), } } } -// closeConnection closes a connection, not the pool itself. This method will actually closeConnection the connection, -// making it unusable, to instead return the connection to the pool, use put. -func (p *pool) closeConnection(c *connection) error { - if c.pool != p { +// closeConnection closes a connection. +func (p *pool) closeConnection(conn *connection) error { + if conn.pool != p { return ErrWrongPool } - if atomic.LoadInt32(&c.connected) == connected { - c.closeConnectContext() - _ = c.wait() // Make sure that the connection has finished connecting + if atomic.LoadInt64(&conn.state) == connConnected { + conn.closeConnectContext() + conn.wait() // Make sure that the connection has finished connecting. } - if !atomic.CompareAndSwapInt32(&c.connected, connected, disconnected) { - return nil // We're closing an already closed connection - } - - if c.nc != nil { - err := c.nc.Close() - if err != nil { - return ConnectionError{ConnectionID: c.id, Wrapped: err, message: "failed to close net.Conn"} - } + err := conn.close() + if err != nil { + return ConnectionError{ConnectionID: conn.id, Wrapped: err, message: "failed to close net.Conn"} } return nil @@ -522,80 +522,608 @@ func (p *pool) getGenerationForNewConnection(serviceID *primitive.ObjectID) uint return p.generation.addConnection(serviceID) } -// removeConnection removes a connection from the pool. -func (p *pool) removeConnection(c *connection, reason string) error { - if c.pool != p { - return ErrWrongPool +// removeConnection removes a connection from the pool and emits a "ConnectionClosed" event. +func (p *pool) removeConnection(conn *connection, reason string) error { + if conn == nil { + return nil } - var publishEvent bool - p.Lock() - if _, ok := p.opened[c.poolID]; ok { - publishEvent = true - delete(p.opened, c.poolID) + if conn.pool != p { + return ErrWrongPool } - p.Unlock() - // Only update the generation numbers map if the connection has retrieved its generation number. Otherwise, we'd - // decrement the count for the generation even though it had never been incremented. - if c.hasGenerationNumber() { - c.descMu.RLock() - serviceID := c.desc.ServiceID - c.descMu.RUnlock() - p.generation.removeConnection(serviceID) + p.createConnectionsCond.L.Lock() + _, ok := p.conns[conn.poolID] + if !ok { + // If the connection has been removed from the pool already, exit without doing any + // additional state changes. + p.createConnectionsCond.L.Unlock() + return nil + } + delete(p.conns, conn.poolID) + // Signal the createConnectionsCond so any goroutines waiting for a new connection slot in the + // pool will proceed. + p.createConnectionsCond.Signal() + p.createConnectionsCond.L.Unlock() + + // Only update the generation numbers map if the connection has retrieved its generation number. + // Otherwise, we'd decrement the count for the generation even though it had never been + // incremented. + if conn.hasGenerationNumber() { + p.generation.removeConnection(conn.desc.ServiceID) } - if publishEvent && p.monitor != nil { - c.pool.monitor.Event(&event.PoolEvent{ + if p.monitor != nil { + p.monitor.Event(&event.PoolEvent{ Type: event.ConnectionClosed, - Address: c.pool.address.String(), - ConnectionID: c.poolID, + Address: p.address.String(), + ConnectionID: conn.poolID, Reason: reason, }) } + return nil } -// put returns a connection to this pool. If the pool is connected, the connection is not -// stale, and there is space in the cache, the connection is returned to the cache. This -// assumes that the connection has already been counted in p.conns.totalSize. -func (p *pool) put(c *connection) error { - defer p.sem.Release(1) +// checkIn returns an idle connection to the pool. If the connection is perished or the pool is +// closed, it is removed from the connection pool and closed. +func (p *pool) checkIn(conn *connection) error { + if conn == nil { + return nil + } + if conn.pool != p { + return ErrWrongPool + } + if p.monitor != nil { - var cid uint64 - var addr string - if c != nil { - cid = c.poolID - addr = c.addr.String() - } p.monitor.Event(&event.PoolEvent{ Type: event.ConnectionReturned, - ConnectionID: cid, - Address: addr, + ConnectionID: conn.poolID, + Address: conn.addr.String(), }) } - if c == nil { + return p.checkInNoEvent(conn) +} + +// checkInNoEvent returns a connection to the pool. It behaves identically to checkIn except it does +// not publish events. It is only intended for use by pool-internal functions. +func (p *pool) checkInNoEvent(conn *connection) error { + if conn == nil { return nil } - - if c.pool != p { + if conn.pool != p { return ErrWrongPool } - _ = p.conns.Put(c) + // Bump the connection idle deadline here because we're about to make the connection "available". + // The idle deadline is used to determine when a connection has reached its max idle time and + // should be closed. A connection reaches its max idle time when it has been "available" in the + // idle connections stack for more than the configured duration (maxIdleTimeMS). Set it before + // we call connectionPerished(), which checks the idle deadline, because a newly "available" + // connection should never be perished due to max idle time. + conn.bumpIdleDeadline() + + if reason, perished := connectionPerished(conn); perished { + _ = p.removeConnection(conn, reason) + go func() { + _ = p.closeConnection(conn) + }() + return nil + } + + if conn.pool.getState() == poolClosed { + _ = p.removeConnection(conn, event.ReasonPoolClosed) + go func() { + _ = p.closeConnection(conn) + }() + return nil + } + + p.idleMu.Lock() + defer p.idleMu.Unlock() + for { + w := p.idleConnWait.popFront() + if w == nil { + break + } + if w.tryDeliver(conn, nil) { + return nil + } + } + + for _, idle := range p.idleConns { + if idle == conn { + return fmt.Errorf("duplicate idle conn %p in idle connections stack", conn) + } + } + + p.idleConns = append(p.idleConns, conn) return nil } -// clear clears the pool by incrementing the generation -func (p *pool) clear(serviceID *primitive.ObjectID) { - if p.monitor != nil { +// clear marks all connections as stale by incrementing the generation number, stops all background +// goroutines, removes all requests from idleConnWait and newConnWait, and sets the pool state to +// "paused". If serviceID is nil, clear marks all connections as stale. If serviceID is not nil, +// clear marks only connections associated with the given serviceID stale (for use in load balancer +// mode). +func (p *pool) clear(err error, serviceID *primitive.ObjectID) { + if p.getState() == poolClosed { + return + } + + p.generation.clear(serviceID) + + // If serviceID is nil (i.e. not in load balancer mode), transition the pool to a paused state + // by stopping all background goroutines, clearing the wait queues, and setting the pool state + // to "paused". + sendEvent := true + if serviceID == nil { + // While holding the stateMu lock, set the pool state to "paused" if it's currently "ready", + // and set lastClearErr to the error that caused the pool to be cleared. If the pool is + // already paused, don't send another "ConnectionPoolCleared" event. + p.stateMu.Lock() + if p.state == poolPaused { + sendEvent = false + } + if p.state == poolReady { + p.state = poolPaused + } + p.lastClearErr = err + p.stateMu.Unlock() + + pcErr := poolClearedError{err: err, address: p.address} + + // Clear the idle connections wait queue. + p.idleMu.Lock() + for { + w := p.idleConnWait.popFront() + if w == nil { + break + } + w.tryDeliver(nil, pcErr) + } + p.idleMu.Unlock() + + // Clear the new connections wait queue. This effectively pauses the createConnections() + // background goroutine because newConnWait is empty and checkOut() won't insert any more + // wantConns into newConnWait until the pool is marked "ready" again. + p.createConnectionsCond.L.Lock() + for { + w := p.newConnWait.popFront() + if w == nil { + break + } + w.tryDeliver(nil, pcErr) + } + p.createConnectionsCond.L.Unlock() + } + + if sendEvent && p.monitor != nil { p.monitor.Event(&event.PoolEvent{ Type: event.PoolCleared, Address: p.address.String(), ServiceID: serviceID, }) } - p.generation.clear(serviceID) +} + +// getOrQueueForIdleConn attempts to deliver an idle connection to the given wantConn. If there is +// an idle connection in the idle connections stack, it pops an idle connection, delivers it to the +// wantConn, and returns true. If there are no idle connections in the idle connections stack, it +// adds the wantConn to the idleConnWait queue and returns false. +func (p *pool) getOrQueueForIdleConn(w *wantConn) bool { + p.idleMu.Lock() + defer p.idleMu.Unlock() + + // Try to deliver an idle connection from the idleConns stack first. + for len(p.idleConns) > 0 { + conn := p.idleConns[len(p.idleConns)-1] + p.idleConns = p.idleConns[:len(p.idleConns)-1] + + if conn == nil { + continue + } + + if reason, perished := connectionPerished(conn); perished { + _ = conn.pool.removeConnection(conn, reason) + go func() { + _ = conn.pool.closeConnection(conn) + }() + continue + } + + if !w.tryDeliver(conn, nil) { + // If we couldn't deliver the conn to w, put it back in the idleConns stack. + p.idleConns = append(p.idleConns, conn) + } + + // If we got here, we tried to deliver an idle conn to w. No matter if tryDeliver() returned + // true or false, w is no longer waiting and doesn't need to be added to any wait queues, so + // return delivered = true. + return true + } + + p.idleConnWait.cleanFront() + p.idleConnWait.pushBack(w) + return false +} + +func (p *pool) queueForNewConn(w *wantConn) { + p.createConnectionsCond.L.Lock() + defer p.createConnectionsCond.L.Unlock() + + p.newConnWait.cleanFront() + p.newConnWait.pushBack(w) + p.createConnectionsCond.Signal() +} + +func (p *pool) totalConnectionCount() int { + p.createConnectionsCond.L.Lock() + defer p.createConnectionsCond.L.Unlock() + + return len(p.conns) +} + +func (p *pool) availableConnectionCount() int { + p.idleMu.Lock() + defer p.idleMu.Unlock() + + return len(p.idleConns) +} + +// createConnections creates connections for wantConn requests on the newConnWait queue. +func (p *pool) createConnections(ctx context.Context, wg *sync.WaitGroup) { + defer wg.Done() + + // condition returns true if the createConnections() loop should continue and false if it should + // wait. Note that the condition also listens for Context cancellation, which also causes the + // loop to continue, allowing for a subsequent check to return from createConnections(). + condition := func() bool { + checkOutWaiting := p.newConnWait.len() > 0 + poolHasSpace := p.maxSize == 0 || uint64(len(p.conns)) < p.maxSize + cancelled := ctx.Err() != nil + return (checkOutWaiting && poolHasSpace) || cancelled + } + + // wait waits for there to be an available wantConn and for the pool to have space for a new + // connection. When the condition becomes true, it creates a new connection and returns the + // waiting wantConn and new connection. If the Context is cancelled or there are any + // errors, wait returns with "ok = false". + wait := func() (*wantConn, *connection, bool) { + p.createConnectionsCond.L.Lock() + defer p.createConnectionsCond.L.Unlock() + + for !condition() { + p.createConnectionsCond.Wait() + } + + if ctx.Err() != nil { + return nil, nil, false + } + + p.newConnWait.cleanFront() + w := p.newConnWait.popFront() + if w == nil { + return nil, nil, false + } + + conn := newConnection(p.address, p.connOpts...) + conn.pool = p + conn.poolID = atomic.AddUint64(&p.nextID, 1) + p.conns[conn.poolID] = conn + + return w, conn, true + } + + for ctx.Err() == nil { + w, conn, ok := wait() + if !ok { + continue + } + + if p.monitor != nil { + p.monitor.Event(&event.PoolEvent{ + Type: event.ConnectionCreated, + Address: p.address.String(), + ConnectionID: conn.poolID, + }) + } + + // Pass the createConnections context to connect to allow pool close to cancel connection + // establishment so shutdown doesn't block indefinitely if connectTimeout=0. + err := conn.connect(ctx) + if err != nil { + w.tryDeliver(nil, err) + + // If there's an error connecting the new connection, call the handshake error handler + // that implements the SDAM handshake error handling logic. This must be called after + // delivering the connection error to the waiting wantConn. If it's called before, the + // handshake error handler may clear the connection pool, leading to a different error + // message being delivered to the same waiting wantConn in idleConnWait when the wait + // queues are cleared. + if p.handshakeErrFn != nil { + p.handshakeErrFn(err, conn.generation, conn.desc.ServiceID) + } + + _ = p.removeConnection(conn, event.ReasonError) + _ = p.closeConnection(conn) + continue + } + + if p.monitor != nil { + p.monitor.Event(&event.PoolEvent{ + Type: event.ConnectionReady, + Address: p.address.String(), + ConnectionID: conn.poolID, + }) + } + + if w.tryDeliver(conn, nil) { + continue + } + + _ = p.checkInNoEvent(conn) + } +} + +func (p *pool) maintain(ctx context.Context, wg *sync.WaitGroup) { + defer wg.Done() + + ticker := time.NewTicker(p.maintainInterval) + defer ticker.Stop() + + // remove removes the *wantConn at index i from the slice and returns the new slice. The order + // of the slice is not maintained. + remove := func(arr []*wantConn, i int) []*wantConn { + end := len(arr) - 1 + arr[i], arr[end] = arr[end], arr[i] + return arr[:end] + } + + // removeNotWaiting removes any wantConns that are no longer waiting from given slice of + // wantConns. That allows maintain() to use the size of its wantConns slice as an indication of + // how many new connection requests are outstanding and subtract that from the number of + // connections to ask for when maintaining minPoolSize. + removeNotWaiting := func(arr []*wantConn) []*wantConn { + for i := len(arr) - 1; i >= 0; i-- { + w := arr[i] + if !w.waiting() { + arr = remove(arr, i) + } + } + + return arr + } + + wantConns := make([]*wantConn, 0, p.minSize) + defer func() { + for _, w := range wantConns { + w.tryDeliver(nil, ErrPoolClosed) + } + }() + + for { + select { + case <-ticker.C: + case <-p.maintainReady: + case <-ctx.Done(): + return + } + + // Only maintain the pool while it's in the "ready" state. If the pool state is not "ready", + // wait for the next tick or "ready" signal. Do all of this while holding the stateMu read + // lock to prevent a state change between checking the state and entering the wait queue. + // Not holding the stateMu read lock here may allow maintain() to request wantConns after + // clear() pauses the pool and clears the wait queue, resulting in createConnections() + // doing work while the pool is "paused". + p.stateMu.RLock() + if p.state != poolReady { + p.stateMu.RUnlock() + continue + } + + p.removePerishedConns() + + // Remove any wantConns that are no longer waiting. + wantConns = removeNotWaiting(wantConns) + + // Figure out how many more wantConns we need to satisfy minPoolSize. Assume that the + // outstanding wantConns (i.e. the ones that weren't removed from the slice) will all return + // connections when they're ready, so only add wantConns to make up the difference. Limit + // the number of connections requested to max 10 at a time to prevent overshooting + // minPoolSize in case other checkOut() calls are requesting new connections, too. + total := p.totalConnectionCount() + n := int(p.minSize) - total - len(wantConns) + if n > 10 { + n = 10 + } + + for i := 0; i < n; i++ { + w := newWantConn() + p.queueForNewConn(w) + wantConns = append(wantConns, w) + + // Start a goroutine for each new wantConn, waiting for it to be ready. + go func() { + <-w.ready + if w.conn != nil { + _ = p.checkInNoEvent(w.conn) + } + }() + } + p.stateMu.RUnlock() + } +} + +func (p *pool) removePerishedConns() { + p.idleMu.Lock() + defer p.idleMu.Unlock() + + for i := range p.idleConns { + conn := p.idleConns[i] + if conn == nil { + continue + } + + if reason, perished := connectionPerished(conn); perished { + p.idleConns[i] = nil + + _ = p.removeConnection(conn, reason) + go func() { + _ = p.closeConnection(conn) + }() + } + } + + p.idleConns = compact(p.idleConns) +} + +// compact removes any nil pointers from the slice and keeps the non-nil pointers, retaining the +// order of the non-nil pointers. +func compact(arr []*connection) []*connection { + offset := 0 + for i := range arr { + if arr[i] == nil { + continue + } + arr[offset] = arr[i] + offset++ + } + return arr[:offset] +} + +// A wantConn records state about a wanted connection (that is, an active call to checkOut). +// The conn may be gotten by creating a new connection or by finding an idle connection, or a +// cancellation may make the conn no longer wanted. These three options are racing against each +// other and use wantConn to coordinate and agree about the winning outcome. +// Based on https://cs.opensource.google/go/go/+/refs/tags/go1.16.6:src/net/http/transport.go;l=1174-1240 +type wantConn struct { + ready chan struct{} + + mu sync.Mutex // Guards conn, err + conn *connection + err error +} + +func newWantConn() *wantConn { + return &wantConn{ + ready: make(chan struct{}, 1), + } +} + +// waiting reports whether w is still waiting for an answer (connection or error). +func (w *wantConn) waiting() bool { + select { + case <-w.ready: + return false + default: + return true + } +} + +// tryDeliver attempts to deliver conn, err to w and reports whether it succeeded. +func (w *wantConn) tryDeliver(conn *connection, err error) bool { + w.mu.Lock() + defer w.mu.Unlock() + + if w.conn != nil || w.err != nil { + return false + } + + w.conn = conn + w.err = err + if w.conn == nil && w.err == nil { + panic("x/mongo/driver/topology: internal error: misuse of tryDeliver") + } + close(w.ready) + return true +} + +// cancel marks w as no longer wanting a result (for example, due to cancellation). If a connection +// has been delivered already, cancel returns it with p.checkInNoEvent(). Note that the caller must +// not hold any locks on the pool while calling cancel. +func (w *wantConn) cancel(p *pool, err error) { + if err == nil { + panic("x/mongo/driver/topology: internal error: misuse of cancel") + } + + w.mu.Lock() + if w.conn == nil && w.err == nil { + close(w.ready) // catch misbehavior in future delivery + } + conn := w.conn + w.conn = nil + w.err = err + w.mu.Unlock() + + if conn != nil { + _ = p.checkInNoEvent(conn) + } +} + +// A wantConnQueue is a queue of wantConns. +// Based on https://cs.opensource.google/go/go/+/refs/tags/go1.16.6:src/net/http/transport.go;l=1242-1306 +type wantConnQueue struct { + // This is a queue, not a deque. + // It is split into two stages - head[headPos:] and tail. + // popFront is trivial (headPos++) on the first stage, and + // pushBack is trivial (append) on the second stage. + // If the first stage is empty, popFront can swap the + // first and second stages to remedy the situation. + // + // This two-stage split is analogous to the use of two lists + // in Okasaki's purely functional queue but without the + // overhead of reversing the list when swapping stages. + head []*wantConn + headPos int + tail []*wantConn +} + +// len returns the number of items in the queue. +func (q *wantConnQueue) len() int { + return len(q.head) - q.headPos + len(q.tail) +} + +// pushBack adds w to the back of the queue. +func (q *wantConnQueue) pushBack(w *wantConn) { + q.tail = append(q.tail, w) +} + +// popFront removes and returns the wantConn at the front of the queue. +func (q *wantConnQueue) popFront() *wantConn { + if q.headPos >= len(q.head) { + if len(q.tail) == 0 { + return nil + } + // Pick up tail as new head, clear tail. + q.head, q.headPos, q.tail = q.tail, 0, q.head[:0] + } + w := q.head[q.headPos] + q.head[q.headPos] = nil + q.headPos++ + return w +} + +// peekFront returns the wantConn at the front of the queue without removing it. +func (q *wantConnQueue) peekFront() *wantConn { + if q.headPos < len(q.head) { + return q.head[q.headPos] + } + if len(q.tail) > 0 { + return q.tail[0] + } + return nil +} + +// cleanFront pops any wantConns that are no longer waiting from the head of the queue. +func (q *wantConnQueue) cleanFront() { + for { + w := q.peekFront() + if w == nil || w.waiting() { + return + } + q.popFront() + } } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool_generation_counter.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool_generation_counter.go index 85fa58691..47fac2f61 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool_generation_counter.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool_generation_counter.go @@ -13,6 +13,12 @@ import ( "go.mongodb.org/mongo-driver/bson/primitive" ) +// Pool generation state constants. +const ( + generationDisconnected int64 = iota + generationConnected +) + // generationStats represents the version of a pool. It tracks the generation number as well as the number of // connections that have been created in the generation. type generationStats struct { @@ -24,8 +30,10 @@ type generationStats struct { // load balancer, there is only one service ID: primitive.NilObjectID. For load-balanced deployments, each server behind // the load balancer will have a unique service ID. type poolGenerationMap struct { - // state must be accessed using the atomic package. - state int32 + // state must be accessed using the atomic package and should be at the beginning of the struct. + // - atomic bug: https://pkg.go.dev/sync/atomic#pkg-note-BUG + // - suggested layout: https://go101.org/article/memory-layout.html + state int64 generationMap map[primitive.ObjectID]*generationStats sync.Mutex @@ -40,11 +48,11 @@ func newPoolGenerationMap() *poolGenerationMap { } func (p *poolGenerationMap) connect() { - atomic.StoreInt32(&p.state, connected) + atomic.StoreInt64(&p.state, generationConnected) } func (p *poolGenerationMap) disconnect() { - atomic.StoreInt32(&p.state, disconnected) + atomic.StoreInt64(&p.state, generationDisconnected) } // addConnection increments the connection count for the generation associated with the given service ID and returns the @@ -100,7 +108,7 @@ func (p *poolGenerationMap) clear(serviceIDPtr *primitive.ObjectID) { func (p *poolGenerationMap) stale(serviceIDPtr *primitive.ObjectID, knownGeneration uint64) bool { // If the map has been disconnected, all connections should be considered stale to ensure that they're closed. - if atomic.LoadInt32(&p.state) == disconnected { + if atomic.LoadInt64(&p.state) == generationDisconnected { return true } @@ -125,6 +133,17 @@ func (p *poolGenerationMap) getGeneration(serviceIDPtr *primitive.ObjectID) uint return 0 } +func (p *poolGenerationMap) getNumConns(serviceIDPtr *primitive.ObjectID) uint64 { + serviceID := getServiceID(serviceIDPtr) + p.Lock() + defer p.Unlock() + + if stats, ok := p.generationMap[serviceID]; ok { + return stats.numConns + } + return 0 +} + func getServiceID(oid *primitive.ObjectID) primitive.ObjectID { if oid == nil { return primitive.NilObjectID diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/resource_pool.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/resource_pool.go deleted file mode 100644 index b7fcf3104..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/resource_pool.go +++ /dev/null @@ -1,255 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package topology - -import ( - "fmt" - "sync" - "sync/atomic" - "time" -) - -// expiredFunc is the function type used for testing whether or not resources in a resourcePool have stale. It should -// return true if the resource has stale and can be removed from the pool. -type expiredFunc func(interface{}) bool - -// closeFunc is the function type used to closeConnection resources in a resourcePool. The pool will always call this function -// asynchronously -type closeFunc func(interface{}) - -// initFunc is the function used to add a resource to the resource pool to maintain minimum size. It returns a new -// resource each time it is called. -type initFunc func() interface{} - -type resourcePoolConfig struct { - MaxSize uint64 - MinSize uint64 - MaintainInterval time.Duration - ExpiredFn expiredFunc - CloseFn closeFunc - InitFn initFunc -} - -// setup sets defaults in the rpc and checks that the given values are valid -func (rpc *resourcePoolConfig) setup() error { - if rpc.ExpiredFn == nil { - return fmt.Errorf("an ExpiredFn is required to create a resource pool") - } - if rpc.CloseFn == nil { - return fmt.Errorf("an CloseFn is required to create a resource pool") - } - if rpc.MaintainInterval == time.Duration(0) { - return fmt.Errorf("unable to have MaintainInterval time of %v", rpc.MaintainInterval) - } - return nil -} - -// resourcePoolElement is a link list element -type resourcePoolElement struct { - next, prev *resourcePoolElement - value interface{} -} - -// resourcePool is a concurrent resource pool -type resourcePool struct { - start, end *resourcePoolElement - size, minSize, maxSize, totalSize uint64 - expiredFn expiredFunc - closeFn closeFunc - initFn initFunc - maintainTimer *time.Timer - maintainInterval time.Duration - closed bool - - sync.Mutex -} - -// NewResourcePool creates a new resourcePool instance that is capped to maxSize resources. -// If maxSize is 0, the pool size will be unbounded. -func newResourcePool(config resourcePoolConfig) (*resourcePool, error) { - err := (&config).setup() - if err != nil { - return nil, err - } - rp := &resourcePool{ - minSize: config.MinSize, - maxSize: config.MaxSize, - expiredFn: config.ExpiredFn, - closeFn: config.CloseFn, - initFn: config.InitFn, - maintainInterval: config.MaintainInterval, - } - - return rp, nil -} - -func (rp *resourcePool) initialize() { - rp.Lock() - rp.maintainTimer = time.AfterFunc(rp.maintainInterval, rp.Maintain) - rp.Unlock() - - rp.Maintain() -} - -// add will add a new rpe to the pool, requires that the resource pool is locked -func (rp *resourcePool) add(e *resourcePoolElement) { - if e == nil { - e = &resourcePoolElement{ - value: rp.initFn(), - } - } - - e.next = rp.start - if rp.start != nil { - rp.start.prev = e - } - rp.start = e - if rp.end == nil { - rp.end = e - } - atomic.AddUint64(&rp.size, 1) -} - -// Get returns the first un-stale resource from the pool. If no such resource can be found, nil is returned. -func (rp *resourcePool) Get() interface{} { - rp.Lock() - defer rp.Unlock() - - for rp.start != nil { - curr := rp.start - rp.remove(curr) - if !rp.expiredFn(curr.value) { - return curr.value - } - rp.closeFn(curr.value) - rp.totalSize-- - } - return nil -} - -func (rp *resourcePool) incrementTotal() bool { - rp.Lock() - defer rp.Unlock() - if rp.maxSize > 0 && rp.totalSize >= rp.maxSize { - return false - } - rp.totalSize++ - return true -} - -func (rp *resourcePool) decrementTotal() { - rp.Lock() - defer rp.Unlock() - rp.totalSize-- -} - -func (rp *resourcePool) clearTotal() { - rp.Lock() - defer rp.Unlock() - rp.totalSize = 0 -} - -// Put puts the resource back into the pool if it will not exceed the max size of the pool. -// This assumes that v has already been accounted for by rp.totalSize -func (rp *resourcePool) Put(v interface{}) bool { - rp.Lock() - defer rp.Unlock() - if rp.expiredFn(v) { - rp.closeFn(v) - rp.totalSize-- - return false - } - - rp.add(&resourcePoolElement{value: v}) - return true -} - -// remove removes a rpe from the linked list. Requires that the pool be locked -func (rp *resourcePool) remove(e *resourcePoolElement) { - if e == nil { - return - } - - if e.prev != nil { - e.prev.next = e.next - } - if e.next != nil { - e.next.prev = e.prev - } - if e == rp.start { - rp.start = e.next - } - if e == rp.end { - rp.end = e.prev - } - atomicSubtract1Uint64(&rp.size) -} - -// Maintain puts the pool back into a state of having a correct number of resources if possible and removes all stale resources -func (rp *resourcePool) Maintain() { - rp.Lock() - defer rp.Unlock() - - if rp.closed { - return - } - - for curr := rp.end; curr != nil; curr = curr.prev { - if rp.expiredFn(curr.value) { - rp.remove(curr) - rp.closeFn(curr.value) - rp.totalSize-- - } - } - - for rp.totalSize < rp.minSize { - rp.add(nil) - rp.totalSize++ - } - - // reset the timer for the background cleanup routine - if rp.maintainTimer == nil { - rp.maintainTimer = time.AfterFunc(rp.maintainInterval, rp.Maintain) - } - if !rp.maintainTimer.Stop() { - rp.maintainTimer = time.AfterFunc(rp.maintainInterval, rp.Maintain) - return - } - rp.maintainTimer.Reset(rp.maintainInterval) -} - -// Close clears the pool and stops the background maintenance routine. -func (rp *resourcePool) Close() { - rp.Lock() - defer rp.Unlock() - - // Clear the resources in the pool. - for ; rp.start != nil; rp.start = rp.start.next { - rp.closeFn(rp.start.value) - rp.totalSize-- - } - atomic.StoreUint64(&rp.size, 0) - rp.end = nil - - // Stop the maintenance timer. If it's already fired, a call to Maintain might be waiting for the lock to be - // released, so we set closed to make that call a no-op. - rp.closed = true - _ = rp.maintainTimer.Stop() -} - -func atomicSubtract1Uint64(p *uint64) { - if p == nil || atomic.LoadUint64(p) == 0 { - return - } - - for { - expected := atomic.LoadUint64(p) - if atomic.CompareAndSwapUint64(p, expected, expected-1) { - return - } - } -} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go index 40911126b..d91290e99 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go @@ -8,6 +8,7 @@ package topology import ( "context" + "math" "sync" "time" @@ -17,28 +18,43 @@ import ( const ( rttAlphaValue = 0.2 + minSamples = 5 + maxSamples = 500 ) type rttConfig struct { interval time.Duration - createConnectionFn func() (*connection, error) - createOperationFn func(driver.Connection) *operation.IsMaster + minRTTWindow time.Duration // Window size to calculate minimum RTT over. + createConnectionFn func() *connection + createOperationFn func(driver.Connection) *operation.Hello } type rttMonitor struct { - sync.Mutex - conn *connection + mu sync.RWMutex // mu guards samples, offset, minRTT, averageRTT, and averageRTTSet + samples []time.Duration + offset int + minRTT time.Duration averageRTT time.Duration averageRTTSet bool - closeWg sync.WaitGroup - cfg *rttConfig - ctx context.Context - cancelFn context.CancelFunc + + closeWg sync.WaitGroup + cfg *rttConfig + ctx context.Context + cancelFn context.CancelFunc } -func newRttMonitor(cfg *rttConfig) *rttMonitor { +func newRTTMonitor(cfg *rttConfig) *rttMonitor { + if cfg.interval <= 0 { + panic("RTT monitor interval must be greater than 0") + } + ctx, cancel := context.WithCancel(context.Background()) + // Determine the number of samples we need to keep to store the minWindow of RTT durations. The + // number of samples must be between [5, 500]. + numSamples := int(math.Max(minSamples, math.Min(maxSamples, float64((cfg.minRTTWindow)/cfg.interval)))) + return &rttMonitor{ + samples: make([]time.Duration, numSamples), cfg: cfg, ctx: ctx, cancelFn: cancel, @@ -53,21 +69,6 @@ func (r *rttMonitor) connect() { func (r *rttMonitor) disconnect() { // Signal for the routine to stop. r.cancelFn() - - var conn *connection - r.Lock() - conn = r.conn - r.Unlock() - - if conn != nil { - // If the connection exists, we need to wait for it to be connected. We can ignore the error from conn.wait(). - // If the connection wasn't successfully opened, its state was set back to disconnected, so calling conn.close() - // will be a noop. - conn.closeConnectContext() - _ = conn.wait() - _ = conn.close() - } - r.closeWg.Wait() } @@ -76,77 +77,89 @@ func (r *rttMonitor) start() { ticker := time.NewTicker(r.cfg.interval) defer ticker.Stop() - for { - // The context is only cancelled in disconnect() so if there's an error on it, the monitor is shutting down. - if r.ctx.Err() != nil { - return + var conn *connection + defer func() { + if conn != nil { + // If the connection exists, we need to wait for it to be connected because + // conn.connect() and conn.close() cannot be called concurrently. If the connection + // wasn't successfully opened, its state was set back to disconnected, so calling + // conn.close() will be a no-op. + conn.closeConnectContext() + conn.wait() + _ = conn.close() } + }() - r.pingServer() + for { + conn = r.runHello(conn) select { case <-ticker.C: case <-r.ctx.Done(): - // Shutting down return } } } -// reset sets the average RTT to 0. This should only be called from the server monitor when an error occurs during a -// server check. Errors in the RTT monitor should not reset the average RTT. -func (r *rttMonitor) reset() { - r.Lock() - defer r.Unlock() +// runHello runs a "hello" operation using the provided connection, measures the duration, and adds +// the duration as an RTT sample and returns the connection used. If the provided connection is nil +// or closed, runHello tries to establish a new connection. If the "hello" operation returns an +// error, runHello closes the connection. +func (r *rttMonitor) runHello(conn *connection) *connection { + if conn == nil || conn.closed() { + conn := r.cfg.createConnectionFn() + + err := conn.connect(r.ctx) + if err != nil { + return nil + } - r.averageRTT = 0 - r.averageRTTSet = false -} + // If we just created a new connection, record the "hello" RTT from the new connection and + // return the new connection. Don't run another "hello" command this interval because it's + // now unnecessary. + r.addSample(conn.helloRTT) + return conn + } -func (r *rttMonitor) setupRttConnection() error { - conn, err := r.cfg.createConnectionFn() + start := time.Now() + err := r.cfg.createOperationFn(initConnection{conn}).Execute(r.ctx) if err != nil { - return err + // Errors from the RTT monitor do not reset the RTTs or update the topology, so we close the + // existing connection and recreate it on the next check. + _ = conn.close() + return nil } + r.addSample(time.Since(start)) - r.Lock() - r.conn = conn - r.Unlock() - - r.conn.connect(r.ctx) - return r.conn.wait() + return conn } -func (r *rttMonitor) pingServer() { - if r.conn == nil || r.conn.closed() { - if err := r.setupRttConnection(); err != nil { - return - } - - // Add the initial connection handshake time as an RTT sample. - r.addSample(r.conn.isMasterRTT) - return - } +// reset sets the average and min RTT to 0. This should only be called from the server monitor when an error +// occurs during a server check. Errors in the RTT monitor should not reset the RTTs. +func (r *rttMonitor) reset() { + r.mu.Lock() + defer r.mu.Unlock() - // We're using an already established connection. Issue an isMaster command to get a new RTT sample. - rttConn := initConnection{r.conn} - start := time.Now() - err := r.cfg.createOperationFn(rttConn).Execute(r.ctx) - if err != nil { - // Errors from the RTT monitor do not reset the average RTT or update the topology, so we close the existing - // connection and recreate it on the next check. - _ = r.conn.close() - return + for i := range r.samples { + r.samples[i] = 0 } - - r.addSample(time.Since(start)) + r.offset = 0 + r.minRTT = 0 + r.averageRTT = 0 + r.averageRTTSet = false } func (r *rttMonitor) addSample(rtt time.Duration) { // Lock for the duration of this method. We're doing compuationally inexpensive work very infrequently, so lock // contention isn't expected. - r.Lock() - defer r.Unlock() + r.mu.Lock() + defer r.mu.Unlock() + + r.samples[r.offset] = rtt + r.offset = (r.offset + 1) % len(r.samples) + // Set the minRTT as the minimum of all collected samples. Require at least 5 samples before + // setting minRTT to prevent noisy samples on startup from artificially increasing minRTT. + r.minRTT = min(r.samples, minSamples) if !r.averageRTTSet { r.averageRTT = rtt @@ -157,9 +170,38 @@ func (r *rttMonitor) addSample(rtt time.Duration) { r.averageRTT = time.Duration(rttAlphaValue*float64(rtt) + (1-rttAlphaValue)*float64(r.averageRTT)) } +// min returns the minimum value of the slice of duration samples. Zero values are not considered +// samples and are ignored. If no samples or fewer than minSamples are found in the slice, min +// returns 0. +func min(samples []time.Duration, minSamples int) time.Duration { + count := 0 + min := time.Duration(math.MaxInt64) + for _, d := range samples { + if d > 0 { + count++ + } + if d > 0 && d < min { + min = d + } + } + if count == 0 || count < minSamples { + return 0 + } + return min +} + +// getRTT returns the exponentially weighted moving average observed round-trip time. func (r *rttMonitor) getRTT() time.Duration { - r.Lock() - defer r.Unlock() + r.mu.RLock() + defer r.mu.RUnlock() return r.averageRTT } + +// getMinRTT returns the minimum observed round-trip time over the window period. +func (r *rttMonitor) getMinRTT() time.Duration { + r.mu.RLock() + defer r.mu.RUnlock() + + return r.minRTT +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go index c6f154fd6..3f8620d07 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go @@ -25,6 +25,26 @@ import ( const minHeartbeatInterval = 500 * time.Millisecond +// Server state constants. +const ( + serverDisconnected int64 = iota + serverDisconnecting + serverConnected +) + +func serverStateString(state int64) string { + switch state { + case serverDisconnected: + return "Disconnected" + case serverDisconnecting: + return "Disconnecting" + case serverConnected: + return "Connected" + } + + return "" +} + var ( // ErrServerClosed occurs when an attempt to Get a connection is made after // the server has been closed. @@ -54,37 +74,18 @@ func (ss *SelectedServer) Description() description.SelectedServer { } } -// These constants represent the connection states of a server. -const ( - disconnected int32 = iota - disconnecting - connected - connecting - initialized -) - -func connectionStateString(state int32) string { - switch state { - case 0: - return "Disconnected" - case 1: - return "Disconnecting" - case 2: - return "Connected" - case 3: - return "Connecting" - case 4: - return "Initialized" - } - - return "" -} - // Server is a single server within a topology. type Server struct { - cfg *serverConfig - address address.Address - connectionstate int32 + // The following integer fields must be accessed using the atomic package and should be at the + // beginning of the struct. + // - atomic bug: https://pkg.go.dev/sync/atomic#pkg-note-BUG + // - suggested layout: https://go101.org/article/memory-layout.html + + state int64 + operationCount int64 + + cfg *serverConfig + address address.Address // connection related fields pool *pool @@ -129,11 +130,8 @@ type updateTopologyCallback func(description.Server) description.Server // ConnectServer creates a new Server and then initializes it using the // Connect method. func ConnectServer(addr address.Address, updateCallback updateTopologyCallback, topologyID primitive.ObjectID, opts ...ServerOption) (*Server, error) { - srvr, err := NewServer(addr, topologyID, opts...) - if err != nil { - return nil, err - } - err = srvr.Connect(updateCallback) + srvr := NewServer(addr, topologyID, opts...) + err := srvr.Connect(updateCallback) if err != nil { return nil, err } @@ -142,14 +140,12 @@ func ConnectServer(addr address.Address, updateCallback updateTopologyCallback, // NewServer creates a new server. The mongodb server at the address will be monitored // on an internal monitoring goroutine. -func NewServer(addr address.Address, topologyID primitive.ObjectID, opts ...ServerOption) (*Server, error) { - cfg, err := newServerConfig(opts...) - if err != nil { - return nil, err - } - +func NewServer(addr address.Address, topologyID primitive.ObjectID, opts ...ServerOption) *Server { + cfg := newServerConfig(opts...) globalCtx, globalCtxCancel := context.WithCancel(context.Background()) s := &Server{ + state: serverDisconnected, + cfg: cfg, address: addr, @@ -166,36 +162,34 @@ func NewServer(addr address.Address, topologyID primitive.ObjectID, opts ...Serv s.desc.Store(description.NewDefaultServer(addr)) rttCfg := &rttConfig{ interval: cfg.heartbeatInterval, + minRTTWindow: 5 * time.Minute, createConnectionFn: s.createConnection, createOperationFn: s.createBaseOperation, } - s.rttMonitor = newRttMonitor(rttCfg) + s.rttMonitor = newRTTMonitor(rttCfg) pc := poolConfig{ - Address: addr, - MinPoolSize: cfg.minConns, - MaxPoolSize: cfg.maxConns, - MaxIdleTime: cfg.connectionPoolMaxIdleTime, - PoolMonitor: cfg.poolMonitor, - } - - connectionOpts := make([]ConnectionOption, len(cfg.connectionOpts)) - copy(connectionOpts, cfg.connectionOpts) - connectionOpts = append(connectionOpts, withErrorHandlingCallback(s.ProcessHandshakeError)) - s.pool, err = newPool(pc, connectionOpts...) - if err != nil { - return nil, err - } - + Address: addr, + MinPoolSize: cfg.minConns, + MaxPoolSize: cfg.maxConns, + MaxConnecting: cfg.maxConnecting, + MaxIdleTime: cfg.poolMaxIdleTime, + MaintainInterval: cfg.poolMaintainInterval, + PoolMonitor: cfg.poolMonitor, + handshakeErrFn: s.ProcessHandshakeError, + } + + connectionOpts := copyConnectionOpts(cfg.connectionOpts) + s.pool = newPool(pc, connectionOpts...) s.publishServerOpeningEvent(s.address) - return s, nil + return s } // Connect initializes the Server by starting background monitoring goroutines. // This method must be called before a Server can be used. func (s *Server) Connect(updateCallback updateTopologyCallback) error { - if !atomic.CompareAndSwapInt32(&s.connectionstate, disconnected, connected) { + if !atomic.CompareAndSwapInt64(&s.state, serverDisconnected, serverConnected) { return ErrServerConnected } @@ -212,7 +206,15 @@ func (s *Server) Connect(updateCallback updateTopologyCallback) error { s.closewg.Add(1) go s.update() } - return s.pool.connect() + + // The CMAP spec describes that pools should only be marked "ready" when the server description + // is updated to something other than "Unknown". However, we maintain the previous Server + // behavior here and immediately mark the pool as ready during Connect() to simplify and speed + // up the Client startup behavior. The risk of marking a pool as ready proactively during + // Connect() is that we could attempt to create connections to a server that was configured + // erroneously until the first server check or checkOut() failure occurs, when the SDAM error + // handler would transition the Server back to "Unknown" and set the pool to "paused". + return s.pool.ready() } // Disconnect closes sockets to the server referenced by this Server. @@ -225,7 +227,7 @@ func (s *Server) Connect(updateCallback updateTopologyCallback) error { // any in flight read or write operations. If this method returns with no // errors, all connections associated with this Server have been closed. func (s *Server) Disconnect(ctx context.Context) error { - if !atomic.CompareAndSwapInt32(&s.connectionstate, connected, disconnecting) { + if !atomic.CompareAndSwapInt64(&s.state, serverConnected, serverDisconnecting) { return ErrServerClosed } @@ -240,45 +242,47 @@ func (s *Server) Disconnect(ctx context.Context) error { s.cancelCheck() s.rttMonitor.disconnect() - err := s.pool.disconnect(ctx) - if err != nil { - return err - } + s.pool.close(ctx) s.closewg.Wait() - atomic.StoreInt32(&s.connectionstate, disconnected) + atomic.StoreInt64(&s.state, serverDisconnected) return nil } // Connection gets a connection to the server. func (s *Server) Connection(ctx context.Context) (driver.Connection, error) { - - if s.pool.monitor != nil { - s.pool.monitor.Event(&event.PoolEvent{ - Type: "ConnectionCheckOutStarted", - Address: s.pool.address.String(), - }) - } - - if atomic.LoadInt32(&s.connectionstate) != connected { + if atomic.LoadInt64(&s.state) != serverConnected { return nil, ErrServerClosed } - connImpl, err := s.pool.get(ctx) + // Increment the operation count before calling checkOut to make sure that all connection + // requests are included in the operation count, including those in the wait queue. If we got an + // error instead of a connection, immediately decrement the operation count. + atomic.AddInt64(&s.operationCount, 1) + conn, err := s.pool.checkOut(ctx) if err != nil { - // The error has already been handled by connection.connect, which calls Server.ProcessHandshakeError. + atomic.AddInt64(&s.operationCount, -1) return nil, err } - return &Connection{connection: connImpl}, nil + return &Connection{ + connection: conn, + cleanupServerFn: func() { + // Decrement the operation count whenever the caller is done with the connection. Note + // that cleanupServerFn() is not called while the connection is pinned to a cursor or + // transaction, so the operation count is not decremented until the cursor is closed or + // the transaction is committed or aborted. Use an int64 instead of a uint64 to mitigate + // the impact of any possible bugs that could cause the uint64 to underflow, which would + // make the server much less selectable. + atomic.AddInt64(&s.operationCount, -1) + }, + }, nil } // ProcessHandshakeError implements SDAM error handling for errors that occur before a connection -// finishes handshaking. opCtx is the context passed to Server.Connection() and is used to determine -// whether or not an operation-scoped context deadline or cancellation was the cause of the -// handshake error; it is not used for timeout or cancellation of ProcessHandshakeError. -func (s *Server) ProcessHandshakeError(opCtx context.Context, err error, startingGenerationNumber uint64, serviceID *primitive.ObjectID) { +// finishes handshaking. +func (s *Server) ProcessHandshakeError(err error, startingGenerationNumber uint64, serviceID *primitive.ObjectID) { // Ignore the error if the server is behind a load balancer but the service ID is unknown. This indicates that the // error happened when dialing the connection or during the MongoDB handshake, so we don't know the service ID to // use for clearing the pool. @@ -295,67 +299,17 @@ func (s *Server) ProcessHandshakeError(opCtx context.Context, err error, startin return } - isCtxTimeoutOrCanceled := func(ctx context.Context) bool { - if ctx == nil { - return false - } - - if ctx.Err() != nil { - return true - } - - // In some networking functions, the deadline from the context is used to determine timeouts - // instead of the ctx.Done() chan closure. In that case, there can be a race condition - // between the networking functing returning an error and ctx.Err() returning an an error - // (i.e. the networking function returns an error caused by the context deadline, but - // ctx.Err() returns nil). If the operation-scoped context deadline was exceeded, assume - // operation-scoped context timeout caused the error. - if deadline, ok := ctx.Deadline(); ok && time.Now().After(deadline) { - return true - } - - return false - } - - isErrTimeoutOrCanceled := func(err error) bool { - for err != nil { - // Check for errors that implement the "net.Error" interface and self-report as timeout - // errors. Includes some "*net.OpError" errors and "context.DeadlineExceeded". - if netErr, ok := err.(net.Error); ok && netErr.Timeout() { - return true - } - // Check for context cancellation. Also handle the case where the cancellation error has - // been replaced by "net.errCanceled" (which isn't exported and can't be compared - // directly) by checking the error message. - if err == context.Canceled || err.Error() == "operation was canceled" { - return true - } - - wrapper, ok := err.(interface{ Unwrap() error }) - if !ok { - break - } - err = wrapper.Unwrap() - } - - return false - } - - // Ignore errors that indicate a client-side timeout occurred when the context passed into an - // operation timed out or was canceled (i.e. errors caused by an operation-scoped timeout). - // Timeouts caused by reaching connectTimeoutMS or other non-operation-scoped timeouts should - // still clear the pool. - // TODO(GODRIVER-2038): Remove this condition when connections are no longer created with an - // operation-scoped timeout. - if isCtxTimeoutOrCanceled(opCtx) && isErrTimeoutOrCanceled(wrappedConnErr) { - return - } + // Must hold the processErrorLock while updating the server description and clearing the pool. + // Not holding the lock leads to possible out-of-order processing of pool.clear() and + // pool.ready() calls from concurrent server description updates. + s.processErrorLock.Lock() + defer s.processErrorLock.Unlock() // Since the only kind of ConnectionError we receive from pool.Get will be an initialization error, we should set // the description.Server appropriately. The description should not have a TopologyVersion because the staleness // checking logic above has already determined that this description is not stale. s.updateDescription(description.NewServerFromError(s.address, wrappedConnErr, nil)) - s.pool.clear(serviceID) + s.pool.clear(err, serviceID) s.cancelCheck() } @@ -379,7 +333,7 @@ func (s *Server) SelectedDescription() description.SelectedServer { // updated server descriptions will be sent. The channel will have a buffer // size of one, and will be pre-populated with the current description. func (s *Server) Subscribe() (*ServerSubscription, error) { - if atomic.LoadInt32(&s.connectionstate) != connected { + if atomic.LoadInt64(&s.state) != serverConnected { return nil, ErrSubscribeAfterClosed } ch := make(chan description.Server, 1) @@ -422,7 +376,7 @@ func getWriteConcernErrorForProcessing(err error) (*driver.WriteConcernError, bo } wcerr := writeCmdErr.WriteConcernError - if wcerr != nil && (wcerr.NodeIsRecovering() || wcerr.NotMaster()) { + if wcerr != nil && (wcerr.NodeIsRecovering() || wcerr.NotPrimary()) { return wcerr, true } return nil, false @@ -435,6 +389,9 @@ func (s *Server) ProcessError(err error, conn driver.Connection) driver.ProcessE return driver.NoChange } + // Must hold the processErrorLock while updating the server description and clearing the pool. + // Not holding the lock leads to possible out-of-order processing of pool.clear() and + // pool.ready() calls from concurrent server description updates. s.processErrorLock.Lock() defer s.processErrorLock.Unlock() @@ -442,10 +399,10 @@ func (s *Server) ProcessError(err error, conn driver.Connection) driver.ProcessE if conn.Stale() { return driver.NoChange } - // Invalidate server description if not master or node recovering error occurs. + // Invalidate server description if not primary or node recovering error occurs. // These errors can be reported as a command error or a write concern error. desc := conn.Description() - if cerr, ok := err.(driver.Error); ok && (cerr.NodeIsRecovering() || cerr.NotMaster()) { + if cerr, ok := err.(driver.Error); ok && (cerr.NodeIsRecovering() || cerr.NotPrimary()) { // ignore stale error if desc.TopologyVersion.CompareToIncoming(cerr.TopologyVersion) >= 0 { return driver.NoChange @@ -459,8 +416,9 @@ func (s *Server) ProcessError(err error, conn driver.Connection) driver.ProcessE // If the node is shutting down or is older than 4.2, we synchronously clear the pool if cerr.NodeIsShuttingDown() || desc.WireVersion == nil || desc.WireVersion.Max < 8 { res = driver.ConnectionPoolCleared - s.pool.clear(desc.ServiceID) + s.pool.clear(err, desc.ServiceID) } + return res } if wcerr, ok := getWriteConcernErrorForProcessing(err); ok { @@ -477,7 +435,7 @@ func (s *Server) ProcessError(err error, conn driver.Connection) driver.ProcessE // If the node is shutting down or is older than 4.2, we synchronously clear the pool if wcerr.NodeIsShuttingDown() || desc.WireVersion == nil || desc.WireVersion.Max < 8 { res = driver.ConnectionPoolCleared - s.pool.clear(desc.ServiceID) + s.pool.clear(err, desc.ServiceID) } return res } @@ -499,7 +457,7 @@ func (s *Server) ProcessError(err error, conn driver.Connection) driver.ProcessE // monitoring check. The check is cancelled last to avoid a post-cancellation reconnect racing with // updateDescription. s.updateDescription(description.NewServerFromError(s.address, err, nil)) - s.pool.clear(desc.ServiceID) + s.pool.clear(err, desc.ServiceID) s.cancelCheck() return driver.ConnectionPoolCleared } @@ -577,7 +535,7 @@ func (s *Server) update() { // Perform the next check. desc, err := s.check() if err == errCheckCancelled { - if atomic.LoadInt32(&s.connectionstate) != connected { + if atomic.LoadInt64(&s.state) != serverConnected { continue } @@ -587,13 +545,18 @@ func (s *Server) update() { continue } + // Must hold the processErrorLock while updating the server description and clearing the + // pool. Not holding the lock leads to possible out-of-order processing of pool.clear() and + // pool.ready() calls from concurrent server description updates. + s.processErrorLock.Lock() s.updateDescription(desc) - if desc.LastError != nil { + if err := desc.LastError; err != nil { // Clear the pool once the description has been updated to Unknown. Pass in a nil service ID to clear // because the monitoring routine only runs for non-load balanced deployments in which servers don't return // IDs. - s.pool.clear(nil) + s.pool.clear(err, nil) } + s.processErrorLock.Unlock() // If the server supports streaming or we're already streaming, we want to move to streaming the next response // without waiting. If the server has transitioned to Unknown from a network error, we want to do another @@ -629,6 +592,18 @@ func (s *Server) updateDescription(desc description.Server) { _ = recover() }() + // Anytime we update the server description to something other than "unknown", set the pool to + // "ready". Do this before updating the description so that connections can be checked out as + // soon as the server is selectable. If the pool is already ready, this operation is a no-op. + // Note that this behavior is roughly consistent with the current Go driver behavior (connects + // to all servers, even non-data-bearing nodes) but deviates slightly from CMAP spec, which + // specifies a more restricted set of server descriptions and topologies that should mark the + // pool ready. We don't have access to the topology here, so prefer the current Go driver + // behavior for simplicity. + if desc.Kind != description.Unknown { + _ = s.pool.ready() + } + // Use the updateTopologyCallback to update the parent Topology and get the description that should be stored. callback, ok := s.updateTopologyCallback.Load().(updateTopologyCallback) if ok && callback != nil { @@ -650,9 +625,8 @@ func (s *Server) updateDescription(desc description.Server) { // createConnection creates a new connection instance but does not call connect on it. The caller must call connect // before the connection can be used for network operations. -func (s *Server) createConnection() (*connection, error) { - opts := make([]ConnectionOption, len(s.cfg.connectionOpts)) - copy(opts, s.cfg.connectionOpts) +func (s *Server) createConnection() *connection { + opts := copyConnectionOpts(s.cfg.connectionOpts) opts = append(opts, WithConnectTimeout(func(time.Duration) time.Duration { return s.cfg.heartbeatTimeout }), WithReadTimeout(func(time.Duration) time.Duration { return s.cfg.heartbeatTimeout }), @@ -660,22 +634,24 @@ func (s *Server) createConnection() (*connection, error) { // We override whatever handshaker is currently attached to the options with a basic // one because need to make sure we don't do auth. WithHandshaker(func(h Handshaker) Handshaker { - return operation.NewIsMaster().AppName(s.cfg.appname).Compressors(s.cfg.compressionOpts). + return operation.NewHello().AppName(s.cfg.appname).Compressors(s.cfg.compressionOpts). ServerAPI(s.cfg.serverAPI) }), // Override any monitors specified in options with nil to avoid monitoring heartbeats. WithMonitor(func(*event.CommandMonitor) *event.CommandMonitor { return nil }), - withPoolMonitor(func(*event.PoolMonitor) *event.PoolMonitor { return nil }), ) return newConnection(s.address, opts...) } +func copyConnectionOpts(opts []ConnectionOption) []ConnectionOption { + optsCopy := make([]ConnectionOption, len(opts)) + copy(optsCopy, opts) + return optsCopy +} + func (s *Server) setupHeartbeatConnection() error { - conn, err := s.createConnection() - if err != nil { - return err - } + conn := s.createConnection() // Take the lock when assigning the context and connection because they're accessed by cancelCheck. s.heartbeatLock.Lock() @@ -687,8 +663,7 @@ func (s *Server) setupHeartbeatConnection() error { s.conn = conn s.heartbeatLock.Unlock() - s.conn.connect(s.heartbeatCtx) - return s.conn.wait() + return s.conn.connect(s.heartbeatCtx) } // cancelCheck cancels in-progress connection dials and reads. It does not set any fields on the server. @@ -707,11 +682,11 @@ func (s *Server) cancelCheck() { return } - // If the connection exists, we need to wait for it to be connected conn.connect() and conn.close() cannot be called - // concurrently. We can ignore the error from conn.wait(). If the connection wasn't successfully opened, its state - // was set back to disconnected, so calling conn.close() will be a noop. + // If the connection exists, we need to wait for it to be connected because conn.connect() and + // conn.close() cannot be called concurrently. If the connection wasn't successfully opened, its + // state was set back to disconnected, so calling conn.close() will be a no-op. conn.closeConnectContext() - _ = conn.wait() + conn.wait() _ = conn.close() } @@ -719,9 +694,9 @@ func (s *Server) checkWasCancelled() bool { return s.heartbeatCtx.Err() != nil } -func (s *Server) createBaseOperation(conn driver.Connection) *operation.IsMaster { +func (s *Server) createBaseOperation(conn driver.Connection) *operation.Hello { return operation. - NewIsMaster(). + NewHello(). ClusterClock(s.cfg.clock). Deployment(driver.SingleConnectionDeployment{conn}). ServerAPI(s.cfg.serverAPI) @@ -739,9 +714,8 @@ func (s *Server) check() (description.Server, error) { err = s.setupHeartbeatConnection() if err == nil { // Use the description from the connection handshake as the value for this check. - s.rttMonitor.addSample(s.conn.isMasterRTT) + s.rttMonitor.addSample(s.conn.helloRTT) descPtr = &s.conn.desc - durationNanos = s.conn.isMasterRTT.Nanoseconds() } } @@ -762,7 +736,7 @@ func (s *Server) check() (description.Server, error) { err = baseOperation.StreamResponse(s.heartbeatCtx, heartbeatConn) case streamable: // The server supports the streamable protocol. Set the socket timeout to - // connectTimeoutMS+heartbeatFrequencyMS and execute an awaitable isMaster request. Set conn.canStream so + // connectTimeoutMS+heartbeatFrequencyMS and execute an awaitable hello request. Set conn.canStream so // the wire message will advertise streaming support to the server. // Calculation for maxAwaitTimeMS is taken from time.Duration.Milliseconds (added in Go 1.13). @@ -840,17 +814,27 @@ func extractTopologyVersion(err error) *description.TopologyVersion { return nil } +// MinRTT returns the minimum round-trip time to the server observed over the last 5 minutes. +func (s *Server) MinRTT() time.Duration { + return s.rttMonitor.getMinRTT() +} + +// OperationCount returns the current number of in-progress operations for this server. +func (s *Server) OperationCount() int64 { + return atomic.LoadInt64(&s.operationCount) +} + // String implements the Stringer interface. func (s *Server) String() string { desc := s.Description() - connState := atomic.LoadInt32(&s.connectionstate) + state := atomic.LoadInt64(&s.state) str := fmt.Sprintf("Addr: %s, Type: %s, State: %s", - s.address, desc.Kind, connectionStateString(connState)) + s.address, desc.Kind, serverStateString(state)) if len(desc.Tags) != 0 { str += fmt.Sprintf(", Tag sets: %s", desc.Tags) } - if connState == connected { - str += fmt.Sprintf(", Average RTT: %d", desc.AverageRTT) + if state == serverConnected { + str += fmt.Sprintf(", Average RTT: %s, Min RTT: %s", desc.AverageRTT, s.MinRTT()) } if desc.LastError != nil { str += fmt.Sprintf(", Last error: %s", desc.LastError) @@ -889,17 +873,21 @@ func (ss *ServerSubscription) Unsubscribe() error { // publishes a ServerOpeningEvent to indicate the server is being initialized func (s *Server) publishServerOpeningEvent(addr address.Address) { + if s == nil { + return + } + serverOpening := &event.ServerOpeningEvent{ Address: addr, TopologyID: s.topologyID, } - if s != nil && s.cfg.serverMonitor != nil && s.cfg.serverMonitor.ServerOpening != nil { + if s.cfg.serverMonitor != nil && s.cfg.serverMonitor.ServerOpening != nil { s.cfg.serverMonitor.ServerOpening(serverOpening) } } -// publishes a ServerHeartbeatStartedEvent to indicate an ismaster command has started +// publishes a ServerHeartbeatStartedEvent to indicate a hello command has started func (s *Server) publishServerHeartbeatStartedEvent(connectionID string, await bool) { serverHeartbeatStarted := &event.ServerHeartbeatStartedEvent{ ConnectionID: connectionID, @@ -911,7 +899,7 @@ func (s *Server) publishServerHeartbeatStartedEvent(connectionID string, await b } } -// publishes a ServerHeartbeatSucceededEvent to indicate ismaster has succeeded +// publishes a ServerHeartbeatSucceededEvent to indicate hello has succeeded func (s *Server) publishServerHeartbeatSucceededEvent(connectionID string, durationNanos int64, desc description.Server, @@ -928,7 +916,7 @@ func (s *Server) publishServerHeartbeatSucceededEvent(connectionID string, } } -// publishes a ServerHeartbeatFailedEvent to indicate ismaster has failed +// publishes a ServerHeartbeatFailedEvent to indicate hello has failed func (s *Server) publishServerHeartbeatFailedEvent(connectionID string, durationNanos int64, err error, diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server_options.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server_options.go index 1b82f192a..a4b7dcc20 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server_options.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server_options.go @@ -19,24 +19,28 @@ import ( var defaultRegistry = bson.NewRegistryBuilder().Build() type serverConfig struct { - clock *session.ClusterClock - compressionOpts []string - connectionOpts []ConnectionOption - appname string - heartbeatInterval time.Duration - heartbeatTimeout time.Duration - maxConns uint64 - minConns uint64 - poolMonitor *event.PoolMonitor - serverMonitor *event.ServerMonitor - connectionPoolMaxIdleTime time.Duration - registry *bsoncodec.Registry - monitoringDisabled bool - serverAPI *driver.ServerAPIOptions - loadBalanced bool -} - -func newServerConfig(opts ...ServerOption) (*serverConfig, error) { + clock *session.ClusterClock + compressionOpts []string + connectionOpts []ConnectionOption + appname string + heartbeatInterval time.Duration + heartbeatTimeout time.Duration + serverMonitor *event.ServerMonitor + registry *bsoncodec.Registry + monitoringDisabled bool + serverAPI *driver.ServerAPIOptions + loadBalanced bool + + // Connection pool options. + maxConns uint64 + minConns uint64 + maxConnecting uint64 + poolMonitor *event.PoolMonitor + poolMaxIdleTime time.Duration + poolMaintainInterval time.Duration +} + +func newServerConfig(opts ...ServerOption) *serverConfig { cfg := &serverConfig{ heartbeatInterval: 10 * time.Second, heartbeatTimeout: 10 * time.Second, @@ -45,72 +49,65 @@ func newServerConfig(opts ...ServerOption) (*serverConfig, error) { } for _, opt := range opts { - err := opt(cfg) - if err != nil { - return nil, err + if opt == nil { + continue } + opt(cfg) } - return cfg, nil + return cfg } // ServerOption configures a server. -type ServerOption func(*serverConfig) error +type ServerOption func(*serverConfig) func withMonitoringDisabled(fn func(bool) bool) ServerOption { - return func(cfg *serverConfig) error { + return func(cfg *serverConfig) { cfg.monitoringDisabled = fn(cfg.monitoringDisabled) - return nil } } // WithConnectionOptions configures the server's connections. func WithConnectionOptions(fn func(...ConnectionOption) []ConnectionOption) ServerOption { - return func(cfg *serverConfig) error { + return func(cfg *serverConfig) { cfg.connectionOpts = fn(cfg.connectionOpts...) - return nil } } // WithCompressionOptions configures the server's compressors. func WithCompressionOptions(fn func(...string) []string) ServerOption { - return func(cfg *serverConfig) error { + return func(cfg *serverConfig) { cfg.compressionOpts = fn(cfg.compressionOpts...) - return nil } } // WithServerAppName configures the server's application name. func WithServerAppName(fn func(string) string) ServerOption { - return func(cfg *serverConfig) error { + return func(cfg *serverConfig) { cfg.appname = fn(cfg.appname) - return nil } } // WithHeartbeatInterval configures a server's heartbeat interval. func WithHeartbeatInterval(fn func(time.Duration) time.Duration) ServerOption { - return func(cfg *serverConfig) error { + return func(cfg *serverConfig) { cfg.heartbeatInterval = fn(cfg.heartbeatInterval) - return nil } } // WithHeartbeatTimeout configures how long to wait for a heartbeat socket to // connection. func WithHeartbeatTimeout(fn func(time.Duration) time.Duration) ServerOption { - return func(cfg *serverConfig) error { + return func(cfg *serverConfig) { cfg.heartbeatTimeout = fn(cfg.heartbeatTimeout) - return nil } } // WithMaxConnections configures the maximum number of connections to allow for -// a given server. If max is 0, then the default will be math.MaxInt64. +// a given server. If max is 0, then maximum connection pool size is not limited. func WithMaxConnections(fn func(uint64) uint64) ServerOption { - return func(cfg *serverConfig) error { + return func(cfg *serverConfig) { cfg.maxConns = fn(cfg.maxConns) - return nil } } @@ -118,9 +115,17 @@ func WithMaxConnections(fn func(uint64) uint64) ServerOption { // a given server. If min is 0, then there is no lower limit to the number of // connections. func WithMinConnections(fn func(uint64) uint64) ServerOption { - return func(cfg *serverConfig) error { + return func(cfg *serverConfig) { cfg.minConns = fn(cfg.minConns) - return nil + } +} + +// WithMaxConnecting configures the maximum number of connections a connection +// pool may establish simultaneously. If maxConnecting is 0, the default value +// of 2 is used. +func WithMaxConnecting(fn func(uint64) uint64) ServerOption { + return func(cfg *serverConfig) { + cfg.maxConnecting = fn(cfg.maxConnecting) } } @@ -128,57 +133,58 @@ func WithMinConnections(fn func(uint64) uint64) ServerOption { // before being removed. If connectionPoolMaxIdleTime is 0, then no idle time is set and connections will not be removed // because of their age func WithConnectionPoolMaxIdleTime(fn func(time.Duration) time.Duration) ServerOption { - return func(cfg *serverConfig) error { - cfg.connectionPoolMaxIdleTime = fn(cfg.connectionPoolMaxIdleTime) - return nil + return func(cfg *serverConfig) { + cfg.poolMaxIdleTime = fn(cfg.poolMaxIdleTime) + } +} + +// WithConnectionPoolMaintainInterval configures the interval that the background connection pool +// maintenance goroutine runs. +func WithConnectionPoolMaintainInterval(fn func(time.Duration) time.Duration) ServerOption { + return func(cfg *serverConfig) { + cfg.poolMaintainInterval = fn(cfg.poolMaintainInterval) } } // WithConnectionPoolMonitor configures the monitor for all connection pool actions func WithConnectionPoolMonitor(fn func(*event.PoolMonitor) *event.PoolMonitor) ServerOption { - return func(cfg *serverConfig) error { + return func(cfg *serverConfig) { cfg.poolMonitor = fn(cfg.poolMonitor) - return nil } } // WithServerMonitor configures the monitor for all SDAM events for a server func WithServerMonitor(fn func(*event.ServerMonitor) *event.ServerMonitor) ServerOption { - return func(cfg *serverConfig) error { + return func(cfg *serverConfig) { cfg.serverMonitor = fn(cfg.serverMonitor) - return nil } } // WithClock configures the ClusterClock for the server to use. func WithClock(fn func(clock *session.ClusterClock) *session.ClusterClock) ServerOption { - return func(cfg *serverConfig) error { + return func(cfg *serverConfig) { cfg.clock = fn(cfg.clock) - return nil } } // WithRegistry configures the registry for the server to use when creating // cursors. func WithRegistry(fn func(*bsoncodec.Registry) *bsoncodec.Registry) ServerOption { - return func(cfg *serverConfig) error { + return func(cfg *serverConfig) { cfg.registry = fn(cfg.registry) - return nil } } // WithServerAPI configures the server API options for the server to use. func WithServerAPI(fn func(serverAPI *driver.ServerAPIOptions) *driver.ServerAPIOptions) ServerOption { - return func(cfg *serverConfig) error { + return func(cfg *serverConfig) { cfg.serverAPI = fn(cfg.serverAPI) - return nil } } // WithServerLoadBalanced specifies whether or not the server is behind a load balancer. func WithServerLoadBalanced(fn func(bool) bool) ServerOption { - return func(cfg *serverConfig) error { + return func(cfg *serverConfig) { cfg.loadBalanced = fn(cfg.loadBalanced) - return nil } } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/tls_connection_source_1_16.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/tls_connection_source_1_16.go new file mode 100644 index 000000000..387f2ec04 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/tls_connection_source_1_16.go @@ -0,0 +1,58 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +//go:build !go1.17 +// +build !go1.17 + +package topology + +import ( + "context" + "crypto/tls" + "net" +) + +type tlsConn interface { + net.Conn + + // Only require Handshake on the interface for Go 1.16 and less. + Handshake() error + ConnectionState() tls.ConnectionState +} + +var _ tlsConn = (*tls.Conn)(nil) + +type tlsConnectionSource interface { + Client(net.Conn, *tls.Config) tlsConn +} + +type tlsConnectionSourceFn func(net.Conn, *tls.Config) tlsConn + +var _ tlsConnectionSource = (tlsConnectionSourceFn)(nil) + +func (t tlsConnectionSourceFn) Client(nc net.Conn, cfg *tls.Config) tlsConn { + return t(nc, cfg) +} + +var defaultTLSConnectionSource tlsConnectionSourceFn = func(nc net.Conn, cfg *tls.Config) tlsConn { + return tls.Client(nc, cfg) +} + +// clientHandshake will perform a handshake with a goroutine and wait for its completion on Go 1.16 and less +// when HandshakeContext is not available. +func clientHandshake(ctx context.Context, client tlsConn) error { + errChan := make(chan error, 1) + go func() { + errChan <- client.Handshake() + }() + + select { + case err := <-errChan: + return err + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/tls_connection_source.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/tls_connection_source_1_17.go similarity index 66% rename from vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/tls_connection_source.go rename to vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/tls_connection_source_1_17.go index 718a9abbd..c9822e060 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/tls_connection_source.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/tls_connection_source_1_17.go @@ -1,19 +1,25 @@ -// Copyright (C) MongoDB, Inc. 2017-present. +// Copyright (C) MongoDB, Inc. 2022-present. // // Licensed under the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +//go:build go1.17 +// +build go1.17 + package topology import ( + "context" "crypto/tls" "net" ) type tlsConn interface { net.Conn - Handshake() error + + // Require HandshakeContext on the interface for Go 1.17 and higher. + HandshakeContext(ctx context.Context) error ConnectionState() tls.ConnectionState } @@ -34,3 +40,8 @@ func (t tlsConnectionSourceFn) Client(nc net.Conn, cfg *tls.Config) tlsConn { var defaultTLSConnectionSource tlsConnectionSourceFn = func(nc net.Conn, cfg *tls.Config) tlsConn { return tls.Client(nc, cfg) } + +// clientHandshake will perform a handshake on Go 1.17 and higher with HandshakeContext. +func clientHandshake(ctx context.Context, client tlsConn) error { + return client.HandshakeContext(ctx) +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go index ddd24cd98..97ec5e527 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go @@ -13,22 +13,30 @@ package topology // import "go.mongodb.org/mongo-driver/x/mongo/driver/topology" import ( "context" "errors" + "fmt" "math/rand" "strings" "sync" "sync/atomic" "time" - "fmt" - "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/randutil" "go.mongodb.org/mongo-driver/mongo/address" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/dns" ) +// Topology state constants. +const ( + topologyDisconnected int64 = iota + topologyDisconnecting + topologyConnected + topologyConnecting +) + // ErrSubscribeAfterClosed is returned when a user attempts to subscribe to a // closed Server or Topology. var ErrSubscribeAfterClosed = errors.New("cannot subscribe after closeConnection") @@ -48,6 +56,9 @@ var ErrServerSelectionTimeout = errors.New("server selection timeout") // MonitorMode represents the way in which a server is monitored. type MonitorMode uint8 +// random is a package-global pseudo-random number generator. +var random = randutil.NewLockedRand(rand.NewSource(randutil.CryptoSeed())) + // These constants are the available monitoring modes. const ( AutomaticMode MonitorMode = iota @@ -56,7 +67,7 @@ const ( // Topology represents a MongoDB deployment. type Topology struct { - connectionstate int32 + state int64 cfg *config @@ -144,7 +155,7 @@ func New(opts ...Option) (*Topology, error) { // Connect initializes a Topology and starts the monitoring process. This function // must be called to properly monitor the topology. func (t *Topology) Connect() error { - if !atomic.CompareAndSwapInt32(&t.connectionstate, disconnected, connecting) { + if !atomic.CompareAndSwapInt64(&t.state, topologyDisconnected, topologyConnecting) { return ErrTopologyConnected } @@ -226,14 +237,14 @@ func (t *Topology) Connect() error { t.subscriptionsClosed = false // explicitly set in case topology was disconnected and then reconnected - atomic.StoreInt32(&t.connectionstate, connected) + atomic.StoreInt64(&t.state, topologyConnected) return nil } // Disconnect closes the topology. It stops the monitoring thread and // closes all open subscriptions. func (t *Topology) Disconnect(ctx context.Context) error { - if !atomic.CompareAndSwapInt32(&t.connectionstate, connected, disconnecting) { + if !atomic.CompareAndSwapInt64(&t.state, topologyConnected, topologyDisconnecting) { return ErrTopologyClosed } @@ -265,7 +276,7 @@ func (t *Topology) Disconnect(ctx context.Context) error { t.desc.Store(description.Topology{}) - atomic.StoreInt32(&t.connectionstate, disconnected) + atomic.StoreInt64(&t.state, topologyDisconnected) t.publishTopologyClosedEvent() return nil } @@ -287,7 +298,7 @@ func (t *Topology) Kind() description.TopologyKind { return t.Description().Kind // and will be pre-populated with the current description.Topology. // Subscribe implements the driver.Subscriber interface. func (t *Topology) Subscribe() (*driver.Subscription, error) { - if atomic.LoadInt32(&t.connectionstate) != connected { + if atomic.LoadInt64(&t.state) != topologyConnected { return nil, errors.New("cannot subscribe to Topology that is not connected") } ch := make(chan description.Topology, 1) @@ -335,7 +346,7 @@ func (t *Topology) Unsubscribe(sub *driver.Subscription) error { // RequestImmediateCheck will send heartbeats to all the servers in the // topology right away, instead of waiting for the heartbeat timeout. func (t *Topology) RequestImmediateCheck() { - if atomic.LoadInt32(&t.connectionstate) != connected { + if atomic.LoadInt64(&t.state) != topologyConnected { return } t.serversLock.Lock() @@ -349,7 +360,7 @@ func (t *Topology) RequestImmediateCheck() { // server selection spec, and will time out after severSelectionTimeout or when the // parent context is done. func (t *Topology) SelectServer(ctx context.Context, ss description.ServerSelector) (driver.Server, error) { - if atomic.LoadInt32(&t.connectionstate) != connected { + if atomic.LoadInt64(&t.state) != topologyConnected { return nil, ErrTopologyClosed } var ssTimeoutCh <-chan time.Time @@ -395,26 +406,77 @@ func (t *Topology) SelectServer(ctx context.Context, ss description.ServerSelect continue } - selected := suitable[rand.Intn(len(suitable))] - selectedS, err := t.FindServer(selected) - switch { - case err != nil: + // If there's only one suitable server description, try to find the associated server and + // return it. This is an optimization primarily for standalone and load-balanced deployments. + if len(suitable) == 1 { + server, err := t.FindServer(suitable[0]) + if err != nil { + return nil, err + } + if server == nil { + continue + } + return server, nil + } + + // Randomly select 2 suitable server descriptions and find servers for them. We select two + // so we can pick the one with the one with fewer in-progress operations below. + desc1, desc2 := pick2(suitable) + server1, err := t.FindServer(desc1) + if err != nil { return nil, err - case selectedS != nil: - return selectedS, nil - default: - // We don't have an actual server for the provided description. - // This could happen for a number of reasons, including that the - // server has since stopped being a part of this topology, or that - // the server selector returned no suitable servers. } + server2, err := t.FindServer(desc2) + if err != nil { + return nil, err + } + + // If we don't have an actual server for one or both of the provided descriptions, either + // return the one server we have, or try again if they're both nil. This could happen for a + // number of reasons, including that the server has since stopped being a part of this + // topology. + if server1 == nil || server2 == nil { + if server1 == nil && server2 == nil { + continue + } + if server1 != nil { + return server1, nil + } + return server2, nil + } + + // Of the two randomly selected suitable servers, pick the one with fewer in-use connections. + // We use in-use connections as an analog for in-progress operations because they are almost + // always the same value for a given server. + if server1.OperationCount() < server2.OperationCount() { + return server1, nil + } + return server2, nil } } +// pick2 returns 2 random server descriptions from the input slice of server descriptions, +// guaranteeing that the same element from the slice is not picked twice. The order of server +// descriptions in the input slice may be modified. If fewer than 2 server descriptions are +// provided, pick2 will panic. +func pick2(ds []description.Server) (description.Server, description.Server) { + // Select a random index from the input slice and keep the server description from that index. + idx := random.Intn(len(ds)) + s1 := ds[idx] + + // Swap the selected index to the end and reslice to remove it so we don't pick the same server + // description twice. + ds[idx], ds[len(ds)-1] = ds[len(ds)-1], ds[idx] + ds = ds[:len(ds)-1] + + // Select another random index from the input slice and return both selected server descriptions. + return s1, ds[random.Intn(len(ds))] +} + // FindServer will attempt to find a server that fits the given server description. // This method will return nil, nil if a matching server could not be found. func (t *Topology) FindServer(selected description.Server) (*SelectedServer, error) { - if atomic.LoadInt32(&t.connectionstate) != connected { + if atomic.LoadInt64(&t.state) != topologyConnected { return nil, ErrTopologyClosed } t.serversLock.Lock() @@ -494,7 +556,7 @@ func (t *Topology) selectServerFromDescription(desc description.Topology, func (t *Topology) pollSRVRecords() { defer t.pollingwg.Done() - serverConfig, _ := newServerConfig(t.cfg.serverOpts...) + serverConfig := newServerConfig(t.cfg.serverOpts...) heartbeatInterval := serverConfig.heartbeatInterval pollTicker := time.NewTicker(t.rescanSRVInterval) @@ -527,7 +589,7 @@ func (t *Topology) pollSRVRecords() { break } - parsedHosts, err := t.dnsResolver.ParseHosts(hosts, false) + parsedHosts, err := t.dnsResolver.ParseHosts(hosts, t.cfg.srvServiceName, false) // DNS problem or no verified hosts returned if err != nil || len(parsedHosts) == 0 { if !t.pollHeartbeatTime.Load().(bool) { @@ -581,11 +643,25 @@ func (t *Topology) processSRVResults(parsedHosts []string) bool { t.fsm.removeServerByAddr(addr) t.publishServerClosedEvent(s.address) } + + // Now that we've removed all the hosts that disappeared from the SRV record, we need to add any + // new hosts added to the SRV record. If adding all of the new hosts would increase the number + // of servers past srvMaxHosts, shuffle the list of added hosts. + if t.cfg.srvMaxHosts > 0 && len(t.servers)+len(diff.Added) > t.cfg.srvMaxHosts { + random.Shuffle(len(diff.Added), func(i, j int) { + diff.Added[i], diff.Added[j] = diff.Added[j], diff.Added[i] + }) + } + // Add all added hosts until the number of servers reaches srvMaxHosts. for _, a := range diff.Added { + if t.cfg.srvMaxHosts > 0 && len(t.servers) >= t.cfg.srvMaxHosts { + break + } addr := address.Address(a).Canonicalize() _ = t.addServer(addr) t.fsm.addServer(addr) } + //store new description newDesc := description.Topology{ Kind: t.fsm.Kind, @@ -610,7 +686,6 @@ func (t *Topology) processSRVResults(parsedHosts []string) bool { t.subLock.Unlock() return true - } // apply updates the Topology and its underlying FSM based on the provided server description and returns the server @@ -631,11 +706,7 @@ func (t *Topology) apply(ctx context.Context, desc description.Server) descripti } var current description.Topology - var err error - current, desc, err = t.fsm.apply(desc) - if err != nil { - return desc - } + current, desc = t.fsm.apply(desc) if !oldDesc.Equal(desc) { t.publishServerDescriptionChangedEvent(oldDesc, desc) diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options.go index fa74764e6..b405739cb 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options.go @@ -36,6 +36,8 @@ type config struct { uri string serverSelectionTimeout time.Duration serverMonitor *event.ServerMonitor + srvMaxHosts int + srvServiceName string loadBalanced bool } @@ -46,6 +48,9 @@ func newConfig(opts ...Option) (*config, error) { } for _, opt := range opts { + if opt == nil { + continue + } err := opt(cfg) if err != nil { return nil, err @@ -188,6 +193,7 @@ func WithConnString(fn func(connstring.ConnString) connstring.ConnString) Option AppName: cs.AppName, Authenticator: authenticator, Compressors: cs.Compressors, + LoadBalanced: cs.LoadBalancedSet && cs.LoadBalanced, } if cs.AuthMechanism == "" { // Required for SASL mechanism negotiation during handshake @@ -198,7 +204,10 @@ func WithConnString(fn func(connstring.ConnString) connstring.ConnString) Option } else { // We need to add a non-auth Handshaker to the connection options connOpts = append(connOpts, WithHandshaker(func(h driver.Handshaker) driver.Handshaker { - return operation.NewIsMaster().AppName(cs.AppName).Compressors(cs.Compressors) + return operation.NewHello(). + AppName(cs.AppName). + Compressors(cs.Compressors). + LoadBalanced(cs.LoadBalancedSet && cs.LoadBalanced) })) } @@ -304,6 +313,30 @@ func WithURI(fn func(string) string) Option { } } +// WithLoadBalanced specifies whether or not the cluster is behind a load balancer. +func WithLoadBalanced(fn func(bool) bool) Option { + return func(cfg *config) error { + cfg.loadBalanced = fn(cfg.loadBalanced) + return nil + } +} + +// WithSRVMaxHosts specifies the SRV host limit that was used to create the topology. +func WithSRVMaxHosts(fn func(int) int) Option { + return func(cfg *config) error { + cfg.srvMaxHosts = fn(cfg.srvMaxHosts) + return nil + } +} + +// WithSRVServiceName specifies the SRV service name that was used to create the topology. +func WithSRVServiceName(fn func(string) string) Option { + return func(cfg *config) error { + cfg.srvServiceName = fn(cfg.srvServiceName) + return nil + } +} + // addCACertFromFile adds a root CA certificate to the configuration given a path // to the containing file. func addCACertFromFile(cfg *tls.Config, file string) error { @@ -335,7 +368,7 @@ func loadCert(data []byte) ([]byte, error) { var certBlock *pem.Block for certBlock == nil { - if data == nil || len(data) == 0 { + if len(data) == 0 { return nil, errors.New(".pem file must have both a CERTIFICATE and an RSA PRIVATE KEY section") } @@ -420,13 +453,5 @@ func addClientCertFromFile(cfg *tls.Config, clientFile, keyPasswd string) (strin return "", err } - return x509CertSubject(crt), nil -} - -// WithLoadBalanced specifies whether or not the cluster is behind a load balancer. -func WithLoadBalanced(fn func(bool) bool) Option { - return func(cfg *config) error { - cfg.loadBalanced = fn(cfg.loadBalanced) - return nil - } + return crt.Subject.String(), nil } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options_1_10.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options_1_10.go deleted file mode 100644 index 04c4b8976..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options_1_10.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.10 - -package topology - -import "crypto/x509" - -func x509CertSubject(cert *x509.Certificate) string { - return cert.Subject.String() -} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options_1_9.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options_1_9.go deleted file mode 100644 index b3ad273f5..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options_1_9.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !go1.10 - -package topology - -import ( - "crypto/x509" -) - -// We don't support version less then 1.10, but Evergreen needs to be able to compile the driver -// using version 1.8. -func x509CertSubject(cert *x509.Certificate) string { - return "" -} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/uuid/uuid.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/uuid/uuid.go index 83a67850e..097838731 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/uuid/uuid.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/uuid/uuid.go @@ -7,21 +7,26 @@ package uuid // import "go.mongodb.org/mongo-driver/x/mongo/driver/uuid" import ( - "bytes" - "crypto/rand" "io" + "math/rand" + + "go.mongodb.org/mongo-driver/internal/randutil" ) // UUID represents a UUID. type UUID [16]byte -var rander = rand.Reader +// A source is a UUID generator that reads random values from a randutil.LockedRand. +// It is safe to use from multiple goroutines. +type source struct { + random *randutil.LockedRand +} -// New generates a new uuid. -func New() (UUID, error) { +// new returns a random UUIDv4 with bytes read from the source's random number generator. +func (s *source) new() (UUID, error) { var uuid [16]byte - _, err := io.ReadFull(rander, uuid[:]) + _, err := io.ReadFull(s.random, uuid[:]) if err != nil { return [16]byte{}, err } @@ -31,7 +36,27 @@ func New() (UUID, error) { return uuid, nil } +// newGlobalSource returns a source that uses a "math/rand" pseudo-random number generator seeded +// with a cryptographically-secure random number. It is intended to be used to initialize the +// package-global UUID generator. +func newGlobalSource() *source { + return &source{ + random: randutil.NewLockedRand(rand.NewSource(randutil.CryptoSeed())), + } +} + +// globalSource is a package-global pseudo-random UUID generator. +var globalSource = newGlobalSource() + +// New returns a random UUIDv4. It uses a "math/rand" pseudo-random number generator seeded with a +// cryptographically-secure random number at package initialization. +// +// New should not be used to generate cryptographically-secure random UUIDs. +func New() (UUID, error) { + return globalSource.new() +} + // Equal returns true if two UUIDs are equal. func Equal(a, b UUID) bool { - return bytes.Equal([]byte(a[:]), []byte(b[:])) + return a == b } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go index f00e47b9a..47f086e88 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go @@ -78,7 +78,7 @@ type QueryFlag int32 const ( _ QueryFlag = 1 << iota TailableCursor - SlaveOK + SecondaryOK OplogReplay NoCursorTimeout AwaitData @@ -92,8 +92,8 @@ func (qf QueryFlag) String() string { if qf&TailableCursor == TailableCursor { strs = append(strs, "TailableCursor") } - if qf&SlaveOK == SlaveOK { - strs = append(strs, "SlaveOK") + if qf&SecondaryOK == SecondaryOK { + strs = append(strs, "SecondaryOK") } if qf&OplogReplay == OplogReplay { strs = append(strs, "OplogReplay") @@ -391,7 +391,7 @@ func ReadMsgSectionRawDocumentSequence(src []byte) (identifier string, data []by return "", nil, rem, false } - // After these assignments, rem will be the data containing the identifer string + the document sequence bytes and + // After these assignments, rem will be the data containing the identifier string + the document sequence bytes and // rest will be the rest of the wire message after this document sequence. rem, rest := rem[:length-4], rem[length-4:] diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go index c5898db46..4652247b8 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go @@ -15,6 +15,7 @@ const bufSize = 256 // xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only // be called when the vector facility is available. Implementation in asm_s390x.s. +// //go:noescape func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go index 44dc8e8ca..edcf163c4 100644 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go @@ -1,13 +1,16 @@ // Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. +//go:build amd64 && gc && !purego // +build amd64,gc,!purego package field // feMul sets out = a * b. It works like feMulGeneric. +// //go:noescape func feMul(out *Element, a *Element, b *Element) // feSquare sets out = a * a. It works like feSquareGeneric. +// //go:noescape func feSquare(out *Element, a *Element) diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go index 71ad917da..a7828345f 100644 --- a/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go @@ -1,13 +1,7 @@ -// Copyright 2016 The Go Authors. All rights reserved. +// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// In Go 1.13, the ed25519 package was promoted to the standard library as -// crypto/ed25519, and this package became a wrapper for the standard library one. -// -//go:build !go1.13 -// +build !go1.13 - // Package ed25519 implements the Ed25519 signature algorithm. See // https://ed25519.cr.yp.to/. // @@ -16,21 +10,15 @@ // representation includes a public key suffix to make multiple signing // operations with the same key more efficient. This package refers to the RFC // 8032 private key as the “seed”. +// +// Beginning with Go 1.13, the functionality of this package was moved to the +// standard library as crypto/ed25519. This package only acts as a compatibility +// wrapper. package ed25519 -// This code is a port of the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - import ( - "bytes" - "crypto" - cryptorand "crypto/rand" - "crypto/sha512" - "errors" + "crypto/ed25519" "io" - "strconv" - - "golang.org/x/crypto/ed25519/internal/edwards25519" ) const ( @@ -45,57 +33,21 @@ const ( ) // PublicKey is the type of Ed25519 public keys. -type PublicKey []byte +// +// This type is an alias for crypto/ed25519's PublicKey type. +// See the crypto/ed25519 package for the methods on this type. +type PublicKey = ed25519.PublicKey // PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -type PrivateKey []byte - -// Public returns the PublicKey corresponding to priv. -func (priv PrivateKey) Public() crypto.PublicKey { - publicKey := make([]byte, PublicKeySize) - copy(publicKey, priv[32:]) - return PublicKey(publicKey) -} - -// Seed returns the private key seed corresponding to priv. It is provided for -// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds -// in this package. -func (priv PrivateKey) Seed() []byte { - seed := make([]byte, SeedSize) - copy(seed, priv[:32]) - return seed -} - -// Sign signs the given message with priv. -// Ed25519 performs two passes over messages to be signed and therefore cannot -// handle pre-hashed messages. Thus opts.HashFunc() must return zero to -// indicate the message hasn't been hashed. This can be achieved by passing -// crypto.Hash(0) as the value for opts. -func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { - if opts.HashFunc() != crypto.Hash(0) { - return nil, errors.New("ed25519: cannot sign hashed message") - } - - return Sign(priv, message), nil -} +// +// This type is an alias for crypto/ed25519's PrivateKey type. +// See the crypto/ed25519 package for the methods on this type. +type PrivateKey = ed25519.PrivateKey // GenerateKey generates a public/private key pair using entropy from rand. // If rand is nil, crypto/rand.Reader will be used. func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - if rand == nil { - rand = cryptorand.Reader - } - - seed := make([]byte, SeedSize) - if _, err := io.ReadFull(rand, seed); err != nil { - return nil, nil, err - } - - privateKey := NewKeyFromSeed(seed) - publicKey := make([]byte, PublicKeySize) - copy(publicKey, privateKey[32:]) - - return publicKey, privateKey, nil + return ed25519.GenerateKey(rand) } // NewKeyFromSeed calculates a private key from a seed. It will panic if @@ -103,121 +55,17 @@ func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { // with RFC 8032. RFC 8032's private keys correspond to seeds in this // package. func NewKeyFromSeed(seed []byte) PrivateKey { - if l := len(seed); l != SeedSize { - panic("ed25519: bad seed length: " + strconv.Itoa(l)) - } - - digest := sha512.Sum512(seed) - digest[0] &= 248 - digest[31] &= 127 - digest[31] |= 64 - - var A edwards25519.ExtendedGroupElement - var hBytes [32]byte - copy(hBytes[:], digest[:]) - edwards25519.GeScalarMultBase(&A, &hBytes) - var publicKeyBytes [32]byte - A.ToBytes(&publicKeyBytes) - - privateKey := make([]byte, PrivateKeySize) - copy(privateKey, seed) - copy(privateKey[32:], publicKeyBytes[:]) - - return privateKey + return ed25519.NewKeyFromSeed(seed) } // Sign signs the message with privateKey and returns a signature. It will // panic if len(privateKey) is not PrivateKeySize. func Sign(privateKey PrivateKey, message []byte) []byte { - if l := len(privateKey); l != PrivateKeySize { - panic("ed25519: bad private key length: " + strconv.Itoa(l)) - } - - h := sha512.New() - h.Write(privateKey[:32]) - - var digest1, messageDigest, hramDigest [64]byte - var expandedSecretKey [32]byte - h.Sum(digest1[:0]) - copy(expandedSecretKey[:], digest1[:]) - expandedSecretKey[0] &= 248 - expandedSecretKey[31] &= 63 - expandedSecretKey[31] |= 64 - - h.Reset() - h.Write(digest1[32:]) - h.Write(message) - h.Sum(messageDigest[:0]) - - var messageDigestReduced [32]byte - edwards25519.ScReduce(&messageDigestReduced, &messageDigest) - var R edwards25519.ExtendedGroupElement - edwards25519.GeScalarMultBase(&R, &messageDigestReduced) - - var encodedR [32]byte - R.ToBytes(&encodedR) - - h.Reset() - h.Write(encodedR[:]) - h.Write(privateKey[32:]) - h.Write(message) - h.Sum(hramDigest[:0]) - var hramDigestReduced [32]byte - edwards25519.ScReduce(&hramDigestReduced, &hramDigest) - - var s [32]byte - edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) - - signature := make([]byte, SignatureSize) - copy(signature[:], encodedR[:]) - copy(signature[32:], s[:]) - - return signature + return ed25519.Sign(privateKey, message) } // Verify reports whether sig is a valid signature of message by publicKey. It // will panic if len(publicKey) is not PublicKeySize. func Verify(publicKey PublicKey, message, sig []byte) bool { - if l := len(publicKey); l != PublicKeySize { - panic("ed25519: bad public key length: " + strconv.Itoa(l)) - } - - if len(sig) != SignatureSize || sig[63]&224 != 0 { - return false - } - - var A edwards25519.ExtendedGroupElement - var publicKeyBytes [32]byte - copy(publicKeyBytes[:], publicKey) - if !A.FromBytes(&publicKeyBytes) { - return false - } - edwards25519.FeNeg(&A.X, &A.X) - edwards25519.FeNeg(&A.T, &A.T) - - h := sha512.New() - h.Write(sig[:32]) - h.Write(publicKey[:]) - h.Write(message) - var digest [64]byte - h.Sum(digest[:0]) - - var hReduced [32]byte - edwards25519.ScReduce(&hReduced, &digest) - - var R edwards25519.ProjectiveGroupElement - var s [32]byte - copy(s[:], sig[32:]) - - // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in - // the range [0, order) in order to prevent signature malleability. - if !edwards25519.ScMinimal(&s) { - return false - } - - edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s) - - var checkR [32]byte - R.ToBytes(&checkR) - return bytes.Equal(sig[:32], checkR[:]) + return ed25519.Verify(publicKey, message, sig) } diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go b/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go deleted file mode 100644 index b5974dc8b..000000000 --- a/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 -// +build go1.13 - -// Package ed25519 implements the Ed25519 signature algorithm. See -// https://ed25519.cr.yp.to/. -// -// These functions are also compatible with the “Ed25519” function defined in -// RFC 8032. However, unlike RFC 8032's formulation, this package's private key -// representation includes a public key suffix to make multiple signing -// operations with the same key more efficient. This package refers to the RFC -// 8032 private key as the “seed”. -// -// Beginning with Go 1.13, the functionality of this package was moved to the -// standard library as crypto/ed25519. This package only acts as a compatibility -// wrapper. -package ed25519 - -import ( - "crypto/ed25519" - "io" -) - -const ( - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 - // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. - SeedSize = 32 -) - -// PublicKey is the type of Ed25519 public keys. -// -// This type is an alias for crypto/ed25519's PublicKey type. -// See the crypto/ed25519 package for the methods on this type. -type PublicKey = ed25519.PublicKey - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -// -// This type is an alias for crypto/ed25519's PrivateKey type. -// See the crypto/ed25519 package for the methods on this type. -type PrivateKey = ed25519.PrivateKey - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - return ed25519.GenerateKey(rand) -} - -// NewKeyFromSeed calculates a private key from a seed. It will panic if -// len(seed) is not SeedSize. This function is provided for interoperability -// with RFC 8032. RFC 8032's private keys correspond to seeds in this -// package. -func NewKeyFromSeed(seed []byte) PrivateKey { - return ed25519.NewKeyFromSeed(seed) -} - -// Sign signs the message with privateKey and returns a signature. It will -// panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - return ed25519.Sign(privateKey, message) -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -func Verify(publicKey PublicKey, message, sig []byte) bool { - return ed25519.Verify(publicKey, message, sig) -} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go deleted file mode 100644 index e39f086c1..000000000 --- a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go +++ /dev/null @@ -1,1422 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -// These values are from the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -// d is a constant in the Edwards curve equation. -var d = FieldElement{ - -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, -} - -// d2 is 2*d. -var d2 = FieldElement{ - -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, -} - -// SqrtM1 is the square-root of -1 in the field. -var SqrtM1 = FieldElement{ - -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, -} - -// A is a constant in the Montgomery-form of curve25519. -var A = FieldElement{ - 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, -} - -// bi contains precomputed multiples of the base-point. See the Ed25519 paper -// for a discussion about how these values are used. -var bi = [8]PreComputedGroupElement{ - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, - FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, - FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, - }, - { - FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, - FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, - FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, - }, - { - FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, - FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, - FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, - }, - { - FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, - FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, - FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, - }, -} - -// base contains precomputed multiples of the base-point. See the Ed25519 paper -// for a discussion about how these values are used. -var base = [32][8]PreComputedGroupElement{ - { - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, - FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, - FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, - FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, - FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, - FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, - FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, - FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, - FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, - }, - }, - { - { - FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, - FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, - FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, - }, - { - FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, - FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, - FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, - }, - { - FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, - FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, - FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, - }, - { - FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, - FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, - FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, - }, - { - FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, - FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, - FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, - }, - { - FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, - FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, - FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, - }, - { - FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, - FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, - FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, - }, - { - FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, - FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, - FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, - }, - }, - { - { - FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, - FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, - FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, - }, - { - FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, - FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, - FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, - }, - { - FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, - FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, - FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, - }, - { - FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, - FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, - FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, - }, - { - FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, - FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, - FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, - }, - { - FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, - FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, - FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, - }, - { - FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, - FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, - FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, - }, - { - FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, - FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, - FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, - }, - }, - { - { - FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, - FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, - FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, - }, - { - FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, - FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, - FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, - }, - { - FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, - FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, - FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, - }, - { - FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, - FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, - FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, - }, - { - FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, - FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, - FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, - }, - { - FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, - FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, - FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, - }, - { - FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, - FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, - FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, - }, - { - FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, - FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, - FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, - }, - }, - { - { - FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, - FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, - FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, - }, - { - FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, - FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, - FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, - }, - { - FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, - FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, - FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, - }, - { - FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, - FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, - FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, - }, - { - FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, - FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, - FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, - }, - { - FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, - FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, - FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, - }, - { - FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, - FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, - FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, - }, - { - FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, - FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, - FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, - }, - }, - { - { - FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, - FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, - FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, - }, - { - FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, - FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, - FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, - }, - { - FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, - FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, - FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, - }, - { - FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, - FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, - FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, - }, - { - FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, - FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, - FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, - }, - { - FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, - FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, - FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, - }, - { - FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, - FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, - FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, - }, - { - FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, - FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, - FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, - }, - }, - { - { - FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, - FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, - FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, - }, - { - FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, - FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, - FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, - }, - { - FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, - FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, - FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, - }, - { - FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, - FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, - FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, - }, - { - FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, - FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, - FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, - }, - { - FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, - FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, - FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, - }, - { - FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, - FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, - FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, - }, - { - FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, - FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, - FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, - }, - }, - { - { - FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, - FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, - FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, - }, - { - FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, - FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, - FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, - }, - { - FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, - FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, - FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, - }, - { - FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, - FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, - FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, - }, - { - FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, - FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, - FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, - }, - { - FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, - FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, - FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, - }, - { - FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, - FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, - FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, - }, - { - FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, - FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, - FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, - }, - }, - { - { - FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, - FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, - FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, - }, - { - FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, - FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, - FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, - }, - { - FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, - FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, - FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, - }, - { - FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, - FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, - FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, - }, - { - FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, - FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, - FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, - }, - { - FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, - FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, - FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, - }, - { - FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, - FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, - FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, - }, - { - FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, - FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, - FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, - }, - }, - { - { - FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, - FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, - FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, - }, - { - FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, - FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, - FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, - }, - { - FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, - FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, - FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, - }, - { - FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, - FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, - FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, - }, - { - FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, - FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, - FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, - }, - { - FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, - FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, - FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, - }, - { - FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, - FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, - FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, - }, - { - FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, - FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, - FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, - }, - }, - { - { - FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, - FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, - FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, - }, - { - FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, - FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, - FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, - }, - { - FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, - FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, - FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, - }, - { - FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, - FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, - FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, - }, - { - FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, - FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, - FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, - }, - { - FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, - FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, - FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, - }, - { - FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, - FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, - FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, - }, - { - FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, - FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, - FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, - }, - }, - { - { - FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, - FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, - FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, - }, - { - FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, - FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, - FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, - }, - { - FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, - FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, - FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, - }, - { - FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, - FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, - FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, - }, - { - FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, - FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, - FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, - }, - { - FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, - FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, - FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, - }, - { - FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, - FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, - FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, - }, - { - FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, - FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, - FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, - }, - }, - { - { - FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, - FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, - FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, - }, - { - FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, - FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, - FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, - }, - { - FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, - FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, - FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, - }, - { - FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, - FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, - FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, - }, - { - FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, - FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, - FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, - }, - { - FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, - FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, - FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, - }, - { - FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, - FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, - FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, - }, - { - FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, - FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, - FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, - }, - }, - { - { - FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, - FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, - FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, - }, - { - FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, - FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, - FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, - }, - { - FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, - FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, - FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, - }, - { - FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, - FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, - FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, - }, - { - FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, - FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, - FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, - }, - { - FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, - FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, - FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, - }, - { - FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, - FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, - FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, - }, - { - FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, - FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, - FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, - }, - }, - { - { - FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, - FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, - FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, - }, - { - FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, - FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, - FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, - }, - { - FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, - FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, - FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, - }, - { - FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, - FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, - FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, - }, - { - FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, - FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, - FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, - }, - { - FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, - FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, - FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, - }, - { - FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, - FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, - FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, - }, - { - FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, - FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, - FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, - }, - }, - { - { - FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, - FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, - FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, - }, - { - FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, - FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, - FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, - }, - { - FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, - FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, - FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, - }, - { - FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, - FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, - FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, - }, - { - FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, - FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, - FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, - }, - { - FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, - FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, - FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, - }, - { - FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, - FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, - FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, - }, - { - FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, - FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, - FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, - }, - }, - { - { - FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, - FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, - FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, - }, - { - FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, - FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, - FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, - }, - { - FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, - FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, - FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, - }, - { - FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, - FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, - FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, - }, - { - FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, - FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, - FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, - }, - { - FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, - FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, - FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, - }, - { - FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, - FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, - FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, - }, - { - FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, - FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, - FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, - }, - }, - { - { - FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, - FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, - FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, - }, - { - FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, - FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, - FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, - }, - { - FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, - FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, - FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, - }, - { - FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, - FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, - FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, - }, - { - FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, - FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, - FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, - }, - { - FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, - FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, - FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, - }, - { - FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, - FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, - FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, - }, - { - FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, - FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, - FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, - }, - }, - { - { - FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, - FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, - FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, - }, - { - FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, - FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, - FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, - }, - { - FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, - FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, - FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, - }, - { - FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, - FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, - FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, - }, - { - FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, - FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, - FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, - }, - { - FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, - FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, - FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, - }, - { - FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, - FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, - FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, - }, - { - FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, - FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, - FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, - }, - }, - { - { - FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, - FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, - FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, - }, - { - FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, - FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, - FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, - }, - { - FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, - FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, - FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, - }, - { - FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, - FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, - FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, - }, - { - FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, - FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, - FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, - }, - { - FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, - FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, - FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, - }, - { - FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, - FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, - FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, - }, - { - FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, - FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, - FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, - }, - }, - { - { - FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, - FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, - FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, - }, - { - FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, - FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, - FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, - }, - { - FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, - FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, - FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, - }, - { - FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, - FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, - FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, - }, - { - FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, - FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, - FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, - }, - { - FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, - FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, - FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, - }, - { - FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, - FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, - FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, - }, - { - FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, - FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, - FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, - }, - }, - { - { - FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, - FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, - FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, - }, - { - FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, - FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, - FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, - }, - { - FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, - FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, - FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, - }, - { - FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, - FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, - FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, - }, - { - FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, - FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, - FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, - }, - { - FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, - FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, - FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, - }, - { - FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, - FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, - FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, - }, - { - FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, - FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, - FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, - }, - }, - { - { - FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, - FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, - FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, - }, - { - FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, - FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, - FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, - }, - { - FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, - FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, - FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, - }, - { - FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, - FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, - FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, - }, - { - FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, - FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, - FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, - }, - { - FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, - FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, - FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, - }, - { - FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, - FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, - FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, - }, - { - FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, - FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, - FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, - }, - }, - { - { - FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, - FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, - FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, - }, - { - FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, - FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, - FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, - }, - { - FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, - FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, - FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, - }, - { - FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, - FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, - FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, - }, - { - FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, - FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, - FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, - }, - { - FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, - FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, - FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, - }, - { - FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, - FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, - FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, - }, - { - FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, - FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, - FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, - }, - }, - { - { - FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, - FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, - FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, - }, - { - FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, - FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, - FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, - }, - { - FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, - FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, - FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, - }, - { - FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, - FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, - FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, - }, - { - FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, - FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, - FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, - }, - { - FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, - FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, - FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, - }, - { - FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, - FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, - FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, - }, - { - FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, - FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, - FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, - }, - }, - { - { - FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, - FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, - FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, - }, - { - FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, - FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, - FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, - }, - { - FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, - FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, - FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, - }, - { - FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, - FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, - FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, - }, - { - FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, - FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, - FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, - }, - { - FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, - FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, - FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, - }, - { - FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, - FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, - FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, - }, - { - FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, - FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, - FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, - }, - }, - { - { - FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, - FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, - FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, - }, - { - FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, - FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, - FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, - }, - { - FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, - FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, - FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, - }, - { - FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, - FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, - FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, - }, - { - FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, - FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, - FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, - }, - { - FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, - FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, - FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, - }, - { - FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, - FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, - FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, - }, - { - FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, - FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, - FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, - }, - }, - { - { - FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, - FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, - FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, - }, - { - FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, - FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, - FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, - }, - { - FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, - FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, - FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, - }, - { - FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, - FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, - FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, - }, - { - FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, - FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, - FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, - }, - { - FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, - FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, - FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, - }, - { - FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, - FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, - FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, - }, - { - FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, - FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, - FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, - }, - }, - { - { - FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, - FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, - FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, - }, - { - FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, - FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, - FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, - }, - { - FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, - FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, - FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, - }, - { - FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, - FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, - FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, - }, - { - FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, - FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, - FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, - }, - { - FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, - FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, - FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, - }, - { - FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, - FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, - FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, - }, - { - FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, - FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, - FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, - }, - }, - { - { - FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, - FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, - FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, - }, - { - FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, - FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, - FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, - }, - { - FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, - FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, - FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, - }, - { - FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, - FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, - FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, - }, - { - FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, - FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, - FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, - }, - { - FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, - FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, - FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, - }, - { - FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, - FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, - FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, - }, - { - FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, - FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, - FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, - }, - }, - { - { - FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, - FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, - FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, - }, - { - FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, - FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, - FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, - }, - { - FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, - FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, - FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, - }, - { - FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, - FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, - FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, - }, - { - FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, - FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, - FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, - }, - { - FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, - FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, - FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, - }, - { - FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, - FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, - FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, - }, - { - FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, - FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, - FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, - }, - }, - { - { - FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, - FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, - FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, - }, - { - FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, - FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, - FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, - }, - { - FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, - FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, - FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, - }, - { - FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, - FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, - FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, - }, - { - FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, - FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, - FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, - }, - { - FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, - FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, - FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, - }, - { - FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, - FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, - FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, - }, - { - FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, - FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, - FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, - }, - }, -} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go deleted file mode 100644 index fd03c252a..000000000 --- a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go +++ /dev/null @@ -1,1793 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -import "encoding/binary" - -// This code is a port of the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -// FieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type FieldElement [10]int32 - -var zero FieldElement - -func FeZero(fe *FieldElement) { - copy(fe[:], zero[:]) -} - -func FeOne(fe *FieldElement) { - FeZero(fe) - fe[0] = 1 -} - -func FeAdd(dst, a, b *FieldElement) { - dst[0] = a[0] + b[0] - dst[1] = a[1] + b[1] - dst[2] = a[2] + b[2] - dst[3] = a[3] + b[3] - dst[4] = a[4] + b[4] - dst[5] = a[5] + b[5] - dst[6] = a[6] + b[6] - dst[7] = a[7] + b[7] - dst[8] = a[8] + b[8] - dst[9] = a[9] + b[9] -} - -func FeSub(dst, a, b *FieldElement) { - dst[0] = a[0] - b[0] - dst[1] = a[1] - b[1] - dst[2] = a[2] - b[2] - dst[3] = a[3] - b[3] - dst[4] = a[4] - b[4] - dst[5] = a[5] - b[5] - dst[6] = a[6] - b[6] - dst[7] = a[7] - b[7] - dst[8] = a[8] - b[8] - dst[9] = a[9] - b[9] -} - -func FeCopy(dst, src *FieldElement) { - copy(dst[:], src[:]) -} - -// Replace (f,g) with (g,g) if b == 1; -// replace (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func FeCMove(f, g *FieldElement, b int32) { - b = -b - f[0] ^= b & (f[0] ^ g[0]) - f[1] ^= b & (f[1] ^ g[1]) - f[2] ^= b & (f[2] ^ g[2]) - f[3] ^= b & (f[3] ^ g[3]) - f[4] ^= b & (f[4] ^ g[4]) - f[5] ^= b & (f[5] ^ g[5]) - f[6] ^= b & (f[6] ^ g[6]) - f[7] ^= b & (f[7] ^ g[7]) - f[8] ^= b & (f[8] ^ g[8]) - f[9] ^= b & (f[9] ^ g[9]) -} - -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -func load4(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - r |= int64(in[3]) << 24 - return r -} - -func FeFromBytes(dst *FieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := (load3(src[29:]) & 8388607) << 2 - - FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -// FeToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -func FeIsNegative(f *FieldElement) byte { - var s [32]byte - FeToBytes(&s, f) - return s[0] & 1 -} - -func FeIsNonZero(f *FieldElement) int32 { - var s [32]byte - FeToBytes(&s, f) - var x uint8 - for _, b := range s { - x |= b - } - x |= x >> 4 - x |= x >> 2 - x |= x >> 1 - return int32(x & 1) -} - -// FeNeg sets h = -f -// -// Preconditions: -// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeNeg(h, f *FieldElement) { - h[0] = -f[0] - h[1] = -f[1] - h[2] = -f[2] - h[3] = -f[3] - h[4] = -f[4] - h[5] = -f[5] - h[6] = -f[6] - h[7] = -f[7] - h[8] = -f[8] - h[9] = -f[9] -} - -func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { - var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64 - - /* - |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - */ - - c0 = (h0 + (1 << 25)) >> 26 - h1 += c0 - h0 -= c0 << 26 - c4 = (h4 + (1 << 25)) >> 26 - h5 += c4 - h4 -= c4 << 26 - /* |h0| <= 2^25 */ - /* |h4| <= 2^25 */ - /* |h1| <= 1.51*2^58 */ - /* |h5| <= 1.51*2^58 */ - - c1 = (h1 + (1 << 24)) >> 25 - h2 += c1 - h1 -= c1 << 25 - c5 = (h5 + (1 << 24)) >> 25 - h6 += c5 - h5 -= c5 << 25 - /* |h1| <= 2^24; from now on fits into int32 */ - /* |h5| <= 2^24; from now on fits into int32 */ - /* |h2| <= 1.21*2^59 */ - /* |h6| <= 1.21*2^59 */ - - c2 = (h2 + (1 << 25)) >> 26 - h3 += c2 - h2 -= c2 << 26 - c6 = (h6 + (1 << 25)) >> 26 - h7 += c6 - h6 -= c6 << 26 - /* |h2| <= 2^25; from now on fits into int32 unchanged */ - /* |h6| <= 2^25; from now on fits into int32 unchanged */ - /* |h3| <= 1.51*2^58 */ - /* |h7| <= 1.51*2^58 */ - - c3 = (h3 + (1 << 24)) >> 25 - h4 += c3 - h3 -= c3 << 25 - c7 = (h7 + (1 << 24)) >> 25 - h8 += c7 - h7 -= c7 << 25 - /* |h3| <= 2^24; from now on fits into int32 unchanged */ - /* |h7| <= 2^24; from now on fits into int32 unchanged */ - /* |h4| <= 1.52*2^33 */ - /* |h8| <= 1.52*2^33 */ - - c4 = (h4 + (1 << 25)) >> 26 - h5 += c4 - h4 -= c4 << 26 - c8 = (h8 + (1 << 25)) >> 26 - h9 += c8 - h8 -= c8 << 26 - /* |h4| <= 2^25; from now on fits into int32 unchanged */ - /* |h8| <= 2^25; from now on fits into int32 unchanged */ - /* |h5| <= 1.01*2^24 */ - /* |h9| <= 1.51*2^58 */ - - c9 = (h9 + (1 << 24)) >> 25 - h0 += c9 * 19 - h9 -= c9 << 25 - /* |h9| <= 2^24; from now on fits into int32 unchanged */ - /* |h0| <= 1.8*2^37 */ - - c0 = (h0 + (1 << 25)) >> 26 - h1 += c0 - h0 -= c0 << 26 - /* |h0| <= 2^25; from now on fits into int32 unchanged */ - /* |h1| <= 1.01*2^24 */ - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// FeMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs, can squeeze carries into int32. -func FeMul(h, f, g *FieldElement) { - f0 := int64(f[0]) - f1 := int64(f[1]) - f2 := int64(f[2]) - f3 := int64(f[3]) - f4 := int64(f[4]) - f5 := int64(f[5]) - f6 := int64(f[6]) - f7 := int64(f[7]) - f8 := int64(f[8]) - f9 := int64(f[9]) - - f1_2 := int64(2 * f[1]) - f3_2 := int64(2 * f[3]) - f5_2 := int64(2 * f[5]) - f7_2 := int64(2 * f[7]) - f9_2 := int64(2 * f[9]) - - g0 := int64(g[0]) - g1 := int64(g[1]) - g2 := int64(g[2]) - g3 := int64(g[3]) - g4 := int64(g[4]) - g5 := int64(g[5]) - g6 := int64(g[6]) - g7 := int64(g[7]) - g8 := int64(g[8]) - g9 := int64(g[9]) - - g1_19 := int64(19 * g[1]) /* 1.4*2^29 */ - g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */ - g3_19 := int64(19 * g[3]) - g4_19 := int64(19 * g[4]) - g5_19 := int64(19 * g[5]) - g6_19 := int64(19 * g[6]) - g7_19 := int64(19 * g[7]) - g8_19 := int64(19 * g[8]) - g9_19 := int64(19 * g[9]) - - h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19 - h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19 - h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19 - h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19 - h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19 - h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19 - h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19 - h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19 - h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19 - h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0 - - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { - f0 := int64(f[0]) - f1 := int64(f[1]) - f2 := int64(f[2]) - f3 := int64(f[3]) - f4 := int64(f[4]) - f5 := int64(f[5]) - f6 := int64(f[6]) - f7 := int64(f[7]) - f8 := int64(f[8]) - f9 := int64(f[9]) - f0_2 := int64(2 * f[0]) - f1_2 := int64(2 * f[1]) - f2_2 := int64(2 * f[2]) - f3_2 := int64(2 * f[3]) - f4_2 := int64(2 * f[4]) - f5_2 := int64(2 * f[5]) - f6_2 := int64(2 * f[6]) - f7_2 := int64(2 * f[7]) - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - - h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38 - h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19 - h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19 - h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38 - h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38 - h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19 - h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19 - h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38 - h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38 - h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5 - - return -} - -// FeSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeSquare(h, f *FieldElement) { - h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -// FeSquare2 sets h = 2 * f * f -// -// Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. -// See fe_mul.c for discussion of implementation strategy. -func FeSquare2(h, f *FieldElement) { - h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) - - h0 += h0 - h1 += h1 - h2 += h2 - h3 += h3 - h4 += h4 - h5 += h5 - h6 += h6 - h7 += h7 - h8 += h8 - h9 += h9 - - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -func FeInvert(out, z *FieldElement) { - var t0, t1, t2, t3 FieldElement - var i int - - FeSquare(&t0, z) // 2^1 - FeSquare(&t1, &t0) // 2^2 - for i = 1; i < 2; i++ { // 2^3 - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) // 2^3 + 2^0 - FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 - FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 - FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 - FeSquare(&t2, &t1) // 5,4,3,2,1 - for i = 1; i < 5; i++ { // 9,8,7,6,5 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 - FeSquare(&t2, &t1) // 10..1 - for i = 1; i < 10; i++ { // 19..10 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 19..0 - FeSquare(&t3, &t2) // 20..1 - for i = 1; i < 20; i++ { // 39..20 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 39..0 - FeSquare(&t2, &t2) // 40..1 - for i = 1; i < 10; i++ { // 49..10 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 49..0 - FeSquare(&t2, &t1) // 50..1 - for i = 1; i < 50; i++ { // 99..50 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 99..0 - FeSquare(&t3, &t2) // 100..1 - for i = 1; i < 100; i++ { // 199..100 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 199..0 - FeSquare(&t2, &t2) // 200..1 - for i = 1; i < 50; i++ { // 249..50 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 249..0 - FeSquare(&t1, &t1) // 250..1 - for i = 1; i < 5; i++ { // 254..5 - FeSquare(&t1, &t1) - } - FeMul(out, &t1, &t0) // 254..5,3,1,0 -} - -func fePow22523(out, z *FieldElement) { - var t0, t1, t2 FieldElement - var i int - - FeSquare(&t0, z) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeSquare(&t1, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) - FeMul(&t0, &t0, &t1) - FeSquare(&t0, &t0) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 5; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 20; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 100; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t0, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t0, &t0) - } - FeMul(out, &t0, z) -} - -// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * -// y^2 where d = -121665/121666. -// -// Several representations are used: -// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z -// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT -// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T -// PreComputedGroupElement: (y+x,y-x,2dxy) - -type ProjectiveGroupElement struct { - X, Y, Z FieldElement -} - -type ExtendedGroupElement struct { - X, Y, Z, T FieldElement -} - -type CompletedGroupElement struct { - X, Y, Z, T FieldElement -} - -type PreComputedGroupElement struct { - yPlusX, yMinusX, xy2d FieldElement -} - -type CachedGroupElement struct { - yPlusX, yMinusX, Z, T2d FieldElement -} - -func (p *ProjectiveGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) -} - -func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { - var t0 FieldElement - - FeSquare(&r.X, &p.X) - FeSquare(&r.Z, &p.Y) - FeSquare2(&r.T, &p.Z) - FeAdd(&r.Y, &p.X, &p.Y) - FeSquare(&t0, &r.Y) - FeAdd(&r.Y, &r.Z, &r.X) - FeSub(&r.Z, &r.Z, &r.X) - FeSub(&r.X, &t0, &r.Y) - FeSub(&r.T, &r.T, &r.Z) -} - -func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) - FeZero(&p.T) -} - -func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { - var q ProjectiveGroupElement - p.ToProjective(&q) - q.Double(r) -} - -func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { - FeAdd(&r.yPlusX, &p.Y, &p.X) - FeSub(&r.yMinusX, &p.Y, &p.X) - FeCopy(&r.Z, &p.Z) - FeMul(&r.T2d, &p.T, &d2) -} - -func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeCopy(&r.X, &p.X) - FeCopy(&r.Y, &p.Y) - FeCopy(&r.Z, &p.Z) -} - -func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { - var u, v, v3, vxx, check FieldElement - - FeFromBytes(&p.Y, s) - FeOne(&p.Z) - FeSquare(&u, &p.Y) - FeMul(&v, &u, &d) - FeSub(&u, &u, &p.Z) // y = y^2-1 - FeAdd(&v, &v, &p.Z) // v = dy^2+1 - - FeSquare(&v3, &v) - FeMul(&v3, &v3, &v) // v3 = v^3 - FeSquare(&p.X, &v3) - FeMul(&p.X, &p.X, &v) - FeMul(&p.X, &p.X, &u) // x = uv^7 - - fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) - FeMul(&p.X, &p.X, &v3) - FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) - - var tmpX, tmp2 [32]byte - - FeSquare(&vxx, &p.X) - FeMul(&vxx, &vxx, &v) - FeSub(&check, &vxx, &u) // vx^2-u - if FeIsNonZero(&check) == 1 { - FeAdd(&check, &vxx, &u) // vx^2+u - if FeIsNonZero(&check) == 1 { - return false - } - FeMul(&p.X, &p.X, &SqrtM1) - - FeToBytes(&tmpX, &p.X) - for i, v := range tmpX { - tmp2[31-i] = v - } - } - - if FeIsNegative(&p.X) != (s[31] >> 7) { - FeNeg(&p.X, &p.X) - } - - FeMul(&p.T, &p.X, &p.Y) - return true -} - -func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) -} - -func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) - FeMul(&r.T, &p.X, &p.Y) -} - -func (p *PreComputedGroupElement) Zero() { - FeOne(&p.yPlusX) - FeOne(&p.yMinusX) - FeZero(&p.xy2d) -} - -func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func slide(r *[256]int8, a *[32]byte) { - for i := range r { - r[i] = int8(1 & (a[i>>3] >> uint(i&7))) - } - - for i := range r { - if r[i] != 0 { - for b := 1; b <= 6 && i+b < 256; b++ { - if r[i+b] != 0 { - if r[i]+(r[i+b]<= -15 { - r[i] -= r[i+b] << uint(b) - for k := i + b; k < 256; k++ { - if r[k] == 0 { - r[k] = 1 - break - } - r[k] = 0 - } - } else { - break - } - } - } - } - } -} - -// GeDoubleScalarMultVartime sets r = a*A + b*B -// where a = a[0]+256*a[1]+...+256^31 a[31]. -// and b = b[0]+256*b[1]+...+256^31 b[31]. -// B is the Ed25519 base point (x,4/5) with x positive. -func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { - var aSlide, bSlide [256]int8 - var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A - var t CompletedGroupElement - var u, A2 ExtendedGroupElement - var i int - - slide(&aSlide, a) - slide(&bSlide, b) - - A.ToCached(&Ai[0]) - A.Double(&t) - t.ToExtended(&A2) - - for i := 0; i < 7; i++ { - geAdd(&t, &A2, &Ai[i]) - t.ToExtended(&u) - u.ToCached(&Ai[i+1]) - } - - r.Zero() - - for i = 255; i >= 0; i-- { - if aSlide[i] != 0 || bSlide[i] != 0 { - break - } - } - - for ; i >= 0; i-- { - r.Double(&t) - - if aSlide[i] > 0 { - t.ToExtended(&u) - geAdd(&t, &u, &Ai[aSlide[i]/2]) - } else if aSlide[i] < 0 { - t.ToExtended(&u) - geSub(&t, &u, &Ai[(-aSlide[i])/2]) - } - - if bSlide[i] > 0 { - t.ToExtended(&u) - geMixedAdd(&t, &u, &bi[bSlide[i]/2]) - } else if bSlide[i] < 0 { - t.ToExtended(&u) - geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) - } - - t.ToProjective(r) - } -} - -// equal returns 1 if b == c and 0 otherwise, assuming that b and c are -// non-negative. -func equal(b, c int32) int32 { - x := uint32(b ^ c) - x-- - return int32(x >> 31) -} - -// negative returns 1 if b < 0 and 0 otherwise. -func negative(b int32) int32 { - return (b >> 31) & 1 -} - -func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { - FeCMove(&t.yPlusX, &u.yPlusX, b) - FeCMove(&t.yMinusX, &u.yMinusX, b) - FeCMove(&t.xy2d, &u.xy2d, b) -} - -func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { - var minusT PreComputedGroupElement - bNegative := negative(b) - bAbs := b - (((-bNegative) & b) << 1) - - t.Zero() - for i := int32(0); i < 8; i++ { - PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) - } - FeCopy(&minusT.yPlusX, &t.yMinusX) - FeCopy(&minusT.yMinusX, &t.yPlusX) - FeNeg(&minusT.xy2d, &t.xy2d) - PreComputedGroupElementCMove(t, &minusT, bNegative) -} - -// GeScalarMultBase computes h = a*B, where -// a = a[0]+256*a[1]+...+256^31 a[31] -// B is the Ed25519 base point (x,4/5) with x positive. -// -// Preconditions: -// a[31] <= 127 -func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { - var e [64]int8 - - for i, v := range a { - e[2*i] = int8(v & 15) - e[2*i+1] = int8((v >> 4) & 15) - } - - // each e[i] is between 0 and 15 and e[63] is between 0 and 7. - - carry := int8(0) - for i := 0; i < 63; i++ { - e[i] += carry - carry = (e[i] + 8) >> 4 - e[i] -= carry << 4 - } - e[63] += carry - // each e[i] is between -8 and 8. - - h.Zero() - var t PreComputedGroupElement - var r CompletedGroupElement - for i := int32(1); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } - - var s ProjectiveGroupElement - - h.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToExtended(h) - - for i := int32(0); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } -} - -// The scalars are GF(2^252 + 27742317777372353535851937790883648493). - -// Input: -// a[0]+256*a[1]+...+256^31*a[31] = a -// b[0]+256*b[1]+...+256^31*b[31] = b -// c[0]+256*c[1]+...+256^31*c[31] = c -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScMulAdd(s, a, b, c *[32]byte) { - a0 := 2097151 & load3(a[:]) - a1 := 2097151 & (load4(a[2:]) >> 5) - a2 := 2097151 & (load3(a[5:]) >> 2) - a3 := 2097151 & (load4(a[7:]) >> 7) - a4 := 2097151 & (load4(a[10:]) >> 4) - a5 := 2097151 & (load3(a[13:]) >> 1) - a6 := 2097151 & (load4(a[15:]) >> 6) - a7 := 2097151 & (load3(a[18:]) >> 3) - a8 := 2097151 & load3(a[21:]) - a9 := 2097151 & (load4(a[23:]) >> 5) - a10 := 2097151 & (load3(a[26:]) >> 2) - a11 := (load4(a[28:]) >> 7) - b0 := 2097151 & load3(b[:]) - b1 := 2097151 & (load4(b[2:]) >> 5) - b2 := 2097151 & (load3(b[5:]) >> 2) - b3 := 2097151 & (load4(b[7:]) >> 7) - b4 := 2097151 & (load4(b[10:]) >> 4) - b5 := 2097151 & (load3(b[13:]) >> 1) - b6 := 2097151 & (load4(b[15:]) >> 6) - b7 := 2097151 & (load3(b[18:]) >> 3) - b8 := 2097151 & load3(b[21:]) - b9 := 2097151 & (load4(b[23:]) >> 5) - b10 := 2097151 & (load3(b[26:]) >> 2) - b11 := (load4(b[28:]) >> 7) - c0 := 2097151 & load3(c[:]) - c1 := 2097151 & (load4(c[2:]) >> 5) - c2 := 2097151 & (load3(c[5:]) >> 2) - c3 := 2097151 & (load4(c[7:]) >> 7) - c4 := 2097151 & (load4(c[10:]) >> 4) - c5 := 2097151 & (load3(c[13:]) >> 1) - c6 := 2097151 & (load4(c[15:]) >> 6) - c7 := 2097151 & (load3(c[18:]) >> 3) - c8 := 2097151 & load3(c[21:]) - c9 := 2097151 & (load4(c[23:]) >> 5) - c10 := 2097151 & (load3(c[26:]) >> 2) - c11 := (load4(c[28:]) >> 7) - var carry [23]int64 - - s0 := c0 + a0*b0 - s1 := c1 + a0*b1 + a1*b0 - s2 := c2 + a0*b2 + a1*b1 + a2*b0 - s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 - s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 - s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 - s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 - s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 - s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 - s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 - s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 - s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 - s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 - s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 - s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 - s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 - s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 - s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 - s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 - s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 - s20 := a9*b11 + a10*b10 + a11*b9 - s21 := a10*b11 + a11*b10 - s22 := a11 * b11 - s23 := int64(0) - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - carry[18] = (s18 + (1 << 20)) >> 21 - s19 += carry[18] - s18 -= carry[18] << 21 - carry[20] = (s20 + (1 << 20)) >> 21 - s21 += carry[20] - s20 -= carry[20] << 21 - carry[22] = (s22 + (1 << 20)) >> 21 - s23 += carry[22] - s22 -= carry[22] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - carry[17] = (s17 + (1 << 20)) >> 21 - s18 += carry[17] - s17 -= carry[17] << 21 - carry[19] = (s19 + (1 << 20)) >> 21 - s20 += carry[19] - s19 -= carry[19] << 21 - carry[21] = (s21 + (1 << 20)) >> 21 - s22 += carry[21] - s21 -= carry[21] << 21 - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - s[0] = byte(s0 >> 0) - s[1] = byte(s0 >> 8) - s[2] = byte((s0 >> 16) | (s1 << 5)) - s[3] = byte(s1 >> 3) - s[4] = byte(s1 >> 11) - s[5] = byte((s1 >> 19) | (s2 << 2)) - s[6] = byte(s2 >> 6) - s[7] = byte((s2 >> 14) | (s3 << 7)) - s[8] = byte(s3 >> 1) - s[9] = byte(s3 >> 9) - s[10] = byte((s3 >> 17) | (s4 << 4)) - s[11] = byte(s4 >> 4) - s[12] = byte(s4 >> 12) - s[13] = byte((s4 >> 20) | (s5 << 1)) - s[14] = byte(s5 >> 7) - s[15] = byte((s5 >> 15) | (s6 << 6)) - s[16] = byte(s6 >> 2) - s[17] = byte(s6 >> 10) - s[18] = byte((s6 >> 18) | (s7 << 3)) - s[19] = byte(s7 >> 5) - s[20] = byte(s7 >> 13) - s[21] = byte(s8 >> 0) - s[22] = byte(s8 >> 8) - s[23] = byte((s8 >> 16) | (s9 << 5)) - s[24] = byte(s9 >> 3) - s[25] = byte(s9 >> 11) - s[26] = byte((s9 >> 19) | (s10 << 2)) - s[27] = byte(s10 >> 6) - s[28] = byte((s10 >> 14) | (s11 << 7)) - s[29] = byte(s11 >> 1) - s[30] = byte(s11 >> 9) - s[31] = byte(s11 >> 17) -} - -// Input: -// s[0]+256*s[1]+...+256^63*s[63] = s -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = s mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScReduce(out *[32]byte, s *[64]byte) { - s0 := 2097151 & load3(s[:]) - s1 := 2097151 & (load4(s[2:]) >> 5) - s2 := 2097151 & (load3(s[5:]) >> 2) - s3 := 2097151 & (load4(s[7:]) >> 7) - s4 := 2097151 & (load4(s[10:]) >> 4) - s5 := 2097151 & (load3(s[13:]) >> 1) - s6 := 2097151 & (load4(s[15:]) >> 6) - s7 := 2097151 & (load3(s[18:]) >> 3) - s8 := 2097151 & load3(s[21:]) - s9 := 2097151 & (load4(s[23:]) >> 5) - s10 := 2097151 & (load3(s[26:]) >> 2) - s11 := 2097151 & (load4(s[28:]) >> 7) - s12 := 2097151 & (load4(s[31:]) >> 4) - s13 := 2097151 & (load3(s[34:]) >> 1) - s14 := 2097151 & (load4(s[36:]) >> 6) - s15 := 2097151 & (load3(s[39:]) >> 3) - s16 := 2097151 & load3(s[42:]) - s17 := 2097151 & (load4(s[44:]) >> 5) - s18 := 2097151 & (load3(s[47:]) >> 2) - s19 := 2097151 & (load4(s[49:]) >> 7) - s20 := 2097151 & (load4(s[52:]) >> 4) - s21 := 2097151 & (load3(s[55:]) >> 1) - s22 := 2097151 & (load4(s[57:]) >> 6) - s23 := (load4(s[60:]) >> 3) - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - var carry [17]int64 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - out[0] = byte(s0 >> 0) - out[1] = byte(s0 >> 8) - out[2] = byte((s0 >> 16) | (s1 << 5)) - out[3] = byte(s1 >> 3) - out[4] = byte(s1 >> 11) - out[5] = byte((s1 >> 19) | (s2 << 2)) - out[6] = byte(s2 >> 6) - out[7] = byte((s2 >> 14) | (s3 << 7)) - out[8] = byte(s3 >> 1) - out[9] = byte(s3 >> 9) - out[10] = byte((s3 >> 17) | (s4 << 4)) - out[11] = byte(s4 >> 4) - out[12] = byte(s4 >> 12) - out[13] = byte((s4 >> 20) | (s5 << 1)) - out[14] = byte(s5 >> 7) - out[15] = byte((s5 >> 15) | (s6 << 6)) - out[16] = byte(s6 >> 2) - out[17] = byte(s6 >> 10) - out[18] = byte((s6 >> 18) | (s7 << 3)) - out[19] = byte(s7 >> 5) - out[20] = byte(s7 >> 13) - out[21] = byte(s8 >> 0) - out[22] = byte(s8 >> 8) - out[23] = byte((s8 >> 16) | (s9 << 5)) - out[24] = byte(s9 >> 3) - out[25] = byte(s9 >> 11) - out[26] = byte((s9 >> 19) | (s10 << 2)) - out[27] = byte(s10 >> 6) - out[28] = byte((s10 >> 14) | (s11 << 7)) - out[29] = byte(s11 >> 1) - out[30] = byte(s11 >> 9) - out[31] = byte(s11 >> 17) -} - -// order is the order of Curve25519 in little-endian form. -var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000} - -// ScMinimal returns true if the given scalar is less than the order of the -// curve. -func ScMinimal(scalar *[32]byte) bool { - for i := 3; ; i-- { - v := binary.LittleEndian.Uint64(scalar[i*8:]) - if v > order[i] { - return false - } else if v < order[i] { - break - } else if i == 0 { - return false - } - } - - return true -} diff --git a/vendor/golang.org/x/crypto/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go similarity index 100% rename from vendor/golang.org/x/crypto/poly1305/bits_compat.go rename to vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go diff --git a/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go similarity index 100% rename from vendor/golang.org/x/crypto/poly1305/bits_go1.13.go rename to vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go diff --git a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go similarity index 100% rename from vendor/golang.org/x/crypto/poly1305/mac_noasm.go rename to vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go similarity index 98% rename from vendor/golang.org/x/crypto/poly1305/poly1305.go rename to vendor/golang.org/x/crypto/internal/poly1305/poly1305.go index 9d7a6af09..4aaea810a 100644 --- a/vendor/golang.org/x/crypto/poly1305/poly1305.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go @@ -15,7 +15,7 @@ // used with a fixed key in order to generate one-time keys from an nonce. // However, in this package AES isn't used and the one-time key is specified // directly. -package poly1305 // import "golang.org/x/crypto/poly1305" +package poly1305 import "crypto/subtle" diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go similarity index 100% rename from vendor/golang.org/x/crypto/poly1305/sum_amd64.go rename to vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s similarity index 100% rename from vendor/golang.org/x/crypto/poly1305/sum_amd64.s rename to vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s diff --git a/vendor/golang.org/x/crypto/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go similarity index 99% rename from vendor/golang.org/x/crypto/poly1305/sum_generic.go rename to vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go index c942a6590..e041da5ea 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_generic.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go @@ -136,7 +136,7 @@ func shiftRightBy2(a uint128) uint128 { // updateGeneric absorbs msg into the state.h accumulator. For each chunk m of // 128 bits of message, it computes // -// h₊ = (h + m) * r mod 2¹³⁰ - 5 +// h₊ = (h + m) * r mod 2¹³⁰ - 5 // // If the msg length is not a multiple of TagSize, it assumes the last // incomplete chunk is the final one. @@ -278,8 +278,7 @@ const ( // finalize completes the modular reduction of h and computes // -// out = h + s mod 2¹²⁸ -// +// out = h + s mod 2¹²⁸ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { h0, h1, h2 := h[0], h[1], h[2] diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go similarity index 100% rename from vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go rename to vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s similarity index 100% rename from vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s rename to vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go similarity index 99% rename from vendor/golang.org/x/crypto/poly1305/sum_s390x.go rename to vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go index 62cc9f847..ec9596688 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go @@ -14,6 +14,7 @@ import ( // updateVX is an assembly implementation of Poly1305 that uses vector // instructions. It must only be called if the vector facility (vx) is // available. +// //go:noescape func updateVX(state *macState, msg []byte) diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s similarity index 99% rename from vendor/golang.org/x/crypto/poly1305/sum_s390x.s rename to vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s index 69c64f842..aa9e0494c 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s @@ -18,7 +18,7 @@ // value. These limbs are, for the most part, zero extended and // placed into 64-bit vector register elements. Each vector // register is 128-bits wide and so holds 2 of these elements. -// Using 26-bit limbs allows us plenty of headroom to accomodate +// Using 26-bit limbs allows us plenty of headroom to accommodate // accumulations before and after multiplication without // overflowing either 32-bits (before multiplication) or 64-bits // (after multiplication). diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go index 9d3fffa8f..4269ed113 100644 --- a/vendor/golang.org/x/crypto/ocsp/ocsp.go +++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go @@ -345,6 +345,8 @@ func (req *Request) Marshal() ([]byte, error) { // Response represents an OCSP response containing a single SingleResponse. See // RFC 6960. type Response struct { + Raw []byte + // Status is one of {Good, Revoked, Unknown} Status int SerialNumber *big.Int @@ -518,6 +520,7 @@ func ParseResponseForCert(bytes []byte, cert, issuer *x509.Certificate) (*Respon } ret := &Response{ + Raw: bytes, TBSResponseData: basicResp.TBSResponseData.Raw, Signature: basicResp.Signature.RightAlign(), SignatureAlgorithm: getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm), @@ -668,7 +671,7 @@ func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte // The responder cert is used to populate the responder's name field, and the // certificate itself is provided alongside the OCSP response signature. // -// The issuer cert is used to puplate the IssuerNameHash and IssuerKeyHash fields. +// The issuer cert is used to populate the IssuerNameHash and IssuerKeyHash fields. // // The template is used to populate the SerialNumber, Status, RevokedAt, // RevocationReason, ThisUpdate, and NextUpdate fields. diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/golang.org/x/crypto/openpgp/armor/armor.go index 36a680436..be342ad47 100644 --- a/vendor/golang.org/x/crypto/openpgp/armor/armor.go +++ b/vendor/golang.org/x/crypto/openpgp/armor/armor.go @@ -4,6 +4,12 @@ // Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is // very similar to PEM except that it has an additional CRC checksum. +// +// Deprecated: this package is unmaintained except for security fixes. New +// applications should consider a more focused, modern alternative to OpenPGP +// for their specific task. If you are required to interoperate with OpenPGP +// systems and need a maintained package, consider a community fork. +// See https://golang.org/issue/44226. package armor // import "golang.org/x/crypto/openpgp/armor" import ( @@ -17,12 +23,14 @@ import ( // A Block represents an OpenPGP armored structure. // // The encoded form is: -// -----BEGIN Type----- -// Headers // -// base64-encoded Bytes -// '=' base64 encoded checksum -// -----END Type----- +// -----BEGIN Type----- +// Headers +// +// base64-encoded Bytes +// '=' base64 encoded checksum +// -----END Type----- +// // where Headers is a possibly empty sequence of Key: Value lines. // // Since the armored data can be very large, this package presents a streaming diff --git a/vendor/golang.org/x/crypto/openpgp/armor/encode.go b/vendor/golang.org/x/crypto/openpgp/armor/encode.go index 6f07582c3..5b6e16c19 100644 --- a/vendor/golang.org/x/crypto/openpgp/armor/encode.go +++ b/vendor/golang.org/x/crypto/openpgp/armor/encode.go @@ -96,7 +96,8 @@ func (l *lineBreaker) Close() (err error) { // trailer. // // It's built into a stack of io.Writers: -// encoding -> base64 encoder -> lineBreaker -> out +// +// encoding -> base64 encoder -> lineBreaker -> out type encoding struct { out io.Writer breaker *lineBreaker diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go index 72a6a7394..743b35a12 100644 --- a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go +++ b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go @@ -10,6 +10,12 @@ // This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it // unsuitable for other protocols. RSA should be used in preference in any // case. +// +// Deprecated: this package was only provided to support ElGamal encryption in +// OpenPGP. The golang.org/x/crypto/openpgp package is now deprecated (see +// https://golang.org/issue/44226), and ElGamal in the OpenPGP ecosystem has +// compatibility and security issues (see https://eprint.iacr.org/2021/923). +// Moreover, this package doesn't protect against side-channel attacks. package elgamal // import "golang.org/x/crypto/openpgp/elgamal" import ( @@ -71,8 +77,8 @@ func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err // returns the plaintext of the message. An error can result only if the // ciphertext is invalid. Users should keep in mind that this is a padding // oracle and thus, if exposed to an adaptive chosen ciphertext attack, can -// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks -// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel +// be used to break the cryptosystem. See “Chosen Ciphertext Attacks +// Against Protocols Based on the RSA Encryption Standard PKCS #1”, Daniel // Bleichenbacher, Advances in Cryptology (Crypto '98), func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { s := new(big.Int).Exp(c1, priv.X, priv.P) diff --git a/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/vendor/golang.org/x/crypto/openpgp/errors/errors.go index eb0550b2d..1d7a0ea05 100644 --- a/vendor/golang.org/x/crypto/openpgp/errors/errors.go +++ b/vendor/golang.org/x/crypto/openpgp/errors/errors.go @@ -3,6 +3,12 @@ // license that can be found in the LICENSE file. // Package errors contains common error types for the OpenPGP packages. +// +// Deprecated: this package is unmaintained except for security fixes. New +// applications should consider a more focused, modern alternative to OpenPGP +// for their specific task. If you are required to interoperate with OpenPGP +// systems and need a maintained package, consider a community fork. +// See https://golang.org/issue/44226. package errors // import "golang.org/x/crypto/openpgp/errors" import ( diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go index 9728d61d7..0a19794a8 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/packet.go +++ b/vendor/golang.org/x/crypto/openpgp/packet/packet.go @@ -4,6 +4,12 @@ // Package packet implements parsing and serialization of OpenPGP packets, as // specified in RFC 4880. +// +// Deprecated: this package is unmaintained except for security fixes. New +// applications should consider a more focused, modern alternative to OpenPGP +// for their specific task. If you are required to interoperate with OpenPGP +// systems and need a maintained package, consider a community fork. +// See https://golang.org/issue/44226. package packet // import "golang.org/x/crypto/openpgp/packet" import ( diff --git a/vendor/golang.org/x/crypto/openpgp/read.go b/vendor/golang.org/x/crypto/openpgp/read.go index 6ec664f44..48a893146 100644 --- a/vendor/golang.org/x/crypto/openpgp/read.go +++ b/vendor/golang.org/x/crypto/openpgp/read.go @@ -3,6 +3,12 @@ // license that can be found in the LICENSE file. // Package openpgp implements high level operations on OpenPGP messages. +// +// Deprecated: this package is unmaintained except for security fixes. New +// applications should consider a more focused, modern alternative to OpenPGP +// for their specific task. If you are required to interoperate with OpenPGP +// systems and need a maintained package, consider a community fork. +// See https://golang.org/issue/44226. package openpgp // import "golang.org/x/crypto/openpgp" import ( diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go index 4b9a44ca2..9de04958e 100644 --- a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go +++ b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go @@ -4,6 +4,12 @@ // Package s2k implements the various OpenPGP string-to-key transforms as // specified in RFC 4800 section 3.7.1. +// +// Deprecated: this package is unmaintained except for security fixes. New +// applications should consider a more focused, modern alternative to OpenPGP +// for their specific task. If you are required to interoperate with OpenPGP +// systems and need a maintained package, consider a community fork. +// See https://golang.org/issue/44226. package s2k // import "golang.org/x/crypto/openpgp/s2k" import ( diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go index 593f65300..904b57e01 100644 --- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -32,7 +32,7 @@ import ( // can get a derived key for e.g. AES-256 (which needs a 32-byte key) by // doing: // -// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) // // Remember to get a good random salt. At least 8 bytes is recommended by the // RFC. diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go new file mode 100644 index 000000000..c971a99fa --- /dev/null +++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go @@ -0,0 +1,212 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package scrypt implements the scrypt key derivation function as defined in +// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard +// Functions" (https://www.tarsnap.com/scrypt/scrypt.pdf). +package scrypt // import "golang.org/x/crypto/scrypt" + +import ( + "crypto/sha256" + "encoding/binary" + "errors" + "math/bits" + + "golang.org/x/crypto/pbkdf2" +) + +const maxInt = int(^uint(0) >> 1) + +// blockCopy copies n numbers from src into dst. +func blockCopy(dst, src []uint32, n int) { + copy(dst, src[:n]) +} + +// blockXOR XORs numbers from dst with n numbers from src. +func blockXOR(dst, src []uint32, n int) { + for i, v := range src[:n] { + dst[i] ^= v + } +} + +// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in, +// and puts the result into both tmp and out. +func salsaXOR(tmp *[16]uint32, in, out []uint32) { + w0 := tmp[0] ^ in[0] + w1 := tmp[1] ^ in[1] + w2 := tmp[2] ^ in[2] + w3 := tmp[3] ^ in[3] + w4 := tmp[4] ^ in[4] + w5 := tmp[5] ^ in[5] + w6 := tmp[6] ^ in[6] + w7 := tmp[7] ^ in[7] + w8 := tmp[8] ^ in[8] + w9 := tmp[9] ^ in[9] + w10 := tmp[10] ^ in[10] + w11 := tmp[11] ^ in[11] + w12 := tmp[12] ^ in[12] + w13 := tmp[13] ^ in[13] + w14 := tmp[14] ^ in[14] + w15 := tmp[15] ^ in[15] + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8 + x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15 + + for i := 0; i < 8; i += 2 { + x4 ^= bits.RotateLeft32(x0+x12, 7) + x8 ^= bits.RotateLeft32(x4+x0, 9) + x12 ^= bits.RotateLeft32(x8+x4, 13) + x0 ^= bits.RotateLeft32(x12+x8, 18) + + x9 ^= bits.RotateLeft32(x5+x1, 7) + x13 ^= bits.RotateLeft32(x9+x5, 9) + x1 ^= bits.RotateLeft32(x13+x9, 13) + x5 ^= bits.RotateLeft32(x1+x13, 18) + + x14 ^= bits.RotateLeft32(x10+x6, 7) + x2 ^= bits.RotateLeft32(x14+x10, 9) + x6 ^= bits.RotateLeft32(x2+x14, 13) + x10 ^= bits.RotateLeft32(x6+x2, 18) + + x3 ^= bits.RotateLeft32(x15+x11, 7) + x7 ^= bits.RotateLeft32(x3+x15, 9) + x11 ^= bits.RotateLeft32(x7+x3, 13) + x15 ^= bits.RotateLeft32(x11+x7, 18) + + x1 ^= bits.RotateLeft32(x0+x3, 7) + x2 ^= bits.RotateLeft32(x1+x0, 9) + x3 ^= bits.RotateLeft32(x2+x1, 13) + x0 ^= bits.RotateLeft32(x3+x2, 18) + + x6 ^= bits.RotateLeft32(x5+x4, 7) + x7 ^= bits.RotateLeft32(x6+x5, 9) + x4 ^= bits.RotateLeft32(x7+x6, 13) + x5 ^= bits.RotateLeft32(x4+x7, 18) + + x11 ^= bits.RotateLeft32(x10+x9, 7) + x8 ^= bits.RotateLeft32(x11+x10, 9) + x9 ^= bits.RotateLeft32(x8+x11, 13) + x10 ^= bits.RotateLeft32(x9+x8, 18) + + x12 ^= bits.RotateLeft32(x15+x14, 7) + x13 ^= bits.RotateLeft32(x12+x15, 9) + x14 ^= bits.RotateLeft32(x13+x12, 13) + x15 ^= bits.RotateLeft32(x14+x13, 18) + } + x0 += w0 + x1 += w1 + x2 += w2 + x3 += w3 + x4 += w4 + x5 += w5 + x6 += w6 + x7 += w7 + x8 += w8 + x9 += w9 + x10 += w10 + x11 += w11 + x12 += w12 + x13 += w13 + x14 += w14 + x15 += w15 + + out[0], tmp[0] = x0, x0 + out[1], tmp[1] = x1, x1 + out[2], tmp[2] = x2, x2 + out[3], tmp[3] = x3, x3 + out[4], tmp[4] = x4, x4 + out[5], tmp[5] = x5, x5 + out[6], tmp[6] = x6, x6 + out[7], tmp[7] = x7, x7 + out[8], tmp[8] = x8, x8 + out[9], tmp[9] = x9, x9 + out[10], tmp[10] = x10, x10 + out[11], tmp[11] = x11, x11 + out[12], tmp[12] = x12, x12 + out[13], tmp[13] = x13, x13 + out[14], tmp[14] = x14, x14 + out[15], tmp[15] = x15, x15 +} + +func blockMix(tmp *[16]uint32, in, out []uint32, r int) { + blockCopy(tmp[:], in[(2*r-1)*16:], 16) + for i := 0; i < 2*r; i += 2 { + salsaXOR(tmp, in[i*16:], out[i*8:]) + salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:]) + } +} + +func integer(b []uint32, r int) uint64 { + j := (2*r - 1) * 16 + return uint64(b[j]) | uint64(b[j+1])<<32 +} + +func smix(b []byte, r, N int, v, xy []uint32) { + var tmp [16]uint32 + R := 32 * r + x := xy + y := xy[R:] + + j := 0 + for i := 0; i < R; i++ { + x[i] = binary.LittleEndian.Uint32(b[j:]) + j += 4 + } + for i := 0; i < N; i += 2 { + blockCopy(v[i*R:], x, R) + blockMix(&tmp, x, y, r) + + blockCopy(v[(i+1)*R:], y, R) + blockMix(&tmp, y, x, r) + } + for i := 0; i < N; i += 2 { + j := int(integer(x, r) & uint64(N-1)) + blockXOR(x, v[j*R:], R) + blockMix(&tmp, x, y, r) + + j = int(integer(y, r) & uint64(N-1)) + blockXOR(y, v[j*R:], R) + blockMix(&tmp, y, x, r) + } + j = 0 + for _, v := range x[:R] { + binary.LittleEndian.PutUint32(b[j:], v) + j += 4 + } +} + +// Key derives a key from the password, salt, and cost parameters, returning +// a byte slice of length keyLen that can be used as cryptographic key. +// +// N is a CPU/memory cost parameter, which must be a power of two greater than 1. +// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the +// limits, the function returns a nil byte slice and an error. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32) +// +// The recommended parameters for interactive logins as of 2017 are N=32768, r=8 +// and p=1. The parameters N, r, and p should be increased as memory latency and +// CPU parallelism increases; consider setting N to the highest power of 2 you +// can derive within 100 milliseconds. Remember to get a good random salt. +func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) { + if N <= 1 || N&(N-1) != 0 { + return nil, errors.New("scrypt: N must be > 1 and a power of 2") + } + if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r { + return nil, errors.New("scrypt: parameters are too large") + } + + xy := make([]uint32, 64*r) + v := make([]uint32, 32*N*r) + b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New) + + for i := 0; i < p; i++ { + smix(b[i*128*r:], r, N, v, xy) + } + + return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil +} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go index 916c840b6..4600c2077 100644 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -14,8 +14,10 @@ import ( "time" ) -// These constants from [PROTOCOL.certkeys] represent the algorithm names -// for certificate types supported by this package. +// Certificate algorithm names from [PROTOCOL.certkeys]. These values can appear +// in Certificate.Type, PublicKey.Type, and ClientConfig.HostKeyAlgorithms. +// Unlike key algorithm names, these are not passed to AlgorithmSigner and don't +// appear in the Signature.Format field. const ( CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" @@ -25,6 +27,21 @@ const ( CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com" CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com" + + // CertAlgoRSASHA256v01 and CertAlgoRSASHA512v01 can't appear as a + // Certificate.Type (or PublicKey.Type), but only in + // ClientConfig.HostKeyAlgorithms. + CertAlgoRSASHA256v01 = "rsa-sha2-256-cert-v01@openssh.com" + CertAlgoRSASHA512v01 = "rsa-sha2-512-cert-v01@openssh.com" +) + +const ( + // Deprecated: use CertAlgoRSAv01. + CertSigAlgoRSAv01 = CertAlgoRSAv01 + // Deprecated: use CertAlgoRSASHA256v01. + CertSigAlgoRSASHA2256v01 = CertAlgoRSASHA256v01 + // Deprecated: use CertAlgoRSASHA512v01. + CertSigAlgoRSASHA2512v01 = CertAlgoRSASHA512v01 ) // Certificate types distinguish between host and user @@ -423,6 +440,16 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { } c.SignatureKey = authority.PublicKey() + // Default to KeyAlgoRSASHA512 for ssh-rsa signers. + if v, ok := authority.(AlgorithmSigner); ok && v.PublicKey().Type() == KeyAlgoRSA { + sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), KeyAlgoRSASHA512) + if err != nil { + return err + } + c.Signature = sig + return nil + } + sig, err := authority.Sign(rand, c.bytesForSigning()) if err != nil { return err @@ -431,26 +458,42 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { return nil } -var certAlgoNames = map[string]string{ - KeyAlgoRSA: CertAlgoRSAv01, - KeyAlgoDSA: CertAlgoDSAv01, - KeyAlgoECDSA256: CertAlgoECDSA256v01, - KeyAlgoECDSA384: CertAlgoECDSA384v01, - KeyAlgoECDSA521: CertAlgoECDSA521v01, - KeyAlgoSKECDSA256: CertAlgoSKECDSA256v01, - KeyAlgoED25519: CertAlgoED25519v01, - KeyAlgoSKED25519: CertAlgoSKED25519v01, +// certKeyAlgoNames is a mapping from known certificate algorithm names to the +// corresponding public key signature algorithm. +// +// This map must be kept in sync with the one in agent/client.go. +var certKeyAlgoNames = map[string]string{ + CertAlgoRSAv01: KeyAlgoRSA, + CertAlgoRSASHA256v01: KeyAlgoRSASHA256, + CertAlgoRSASHA512v01: KeyAlgoRSASHA512, + CertAlgoDSAv01: KeyAlgoDSA, + CertAlgoECDSA256v01: KeyAlgoECDSA256, + CertAlgoECDSA384v01: KeyAlgoECDSA384, + CertAlgoECDSA521v01: KeyAlgoECDSA521, + CertAlgoSKECDSA256v01: KeyAlgoSKECDSA256, + CertAlgoED25519v01: KeyAlgoED25519, + CertAlgoSKED25519v01: KeyAlgoSKED25519, +} + +// underlyingAlgo returns the signature algorithm associated with algo (which is +// an advertised or negotiated public key or host key algorithm). These are +// usually the same, except for certificate algorithms. +func underlyingAlgo(algo string) string { + if a, ok := certKeyAlgoNames[algo]; ok { + return a + } + return algo } -// certToPrivAlgo returns the underlying algorithm for a certificate algorithm. -// Panics if a non-certificate algorithm is passed. -func certToPrivAlgo(algo string) string { - for privAlgo, pubAlgo := range certAlgoNames { - if pubAlgo == algo { - return privAlgo +// certificateAlgo returns the certificate algorithms that uses the provided +// underlying signature algorithm. +func certificateAlgo(algo string) (certAlgo string, ok bool) { + for certName, algoName := range certKeyAlgoNames { + if algoName == algo { + return certName, true } } - panic("unknown cert algorithm") + return "", false } func (cert *Certificate) bytesForSigning() []byte { @@ -494,13 +537,13 @@ func (c *Certificate) Marshal() []byte { return result } -// Type returns the key name. It is part of the PublicKey interface. +// Type returns the certificate algorithm name. It is part of the PublicKey interface. func (c *Certificate) Type() string { - algo, ok := certAlgoNames[c.Key.Type()] + certName, ok := certificateAlgo(c.Key.Type()) if !ok { - panic("unknown cert key type " + c.Key.Type()) + panic("unknown certificate type for key type " + c.Key.Type()) } - return algo + return certName } // Verify verifies a signature against the certificate's public diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go index 8bd6b3daf..770e8a663 100644 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ b/vendor/golang.org/x/crypto/ssh/cipher.go @@ -18,7 +18,7 @@ import ( "io/ioutil" "golang.org/x/crypto/chacha20" - "golang.org/x/crypto/poly1305" + "golang.org/x/crypto/internal/poly1305" ) const ( @@ -394,6 +394,10 @@ func (c *gcmCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) } c.incIV() + if len(plain) == 0 { + return nil, errors.New("ssh: empty packet") + } + padding := plain[0] if padding < 4 { // padding is a byte, so it automatically satisfies @@ -636,7 +640,7 @@ const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" // chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com // AEAD, which is described here: // -// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 +// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 // // the methods here also implement padding, which RFC4253 Section 6 // also requires of stream ciphers. @@ -710,6 +714,10 @@ func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([ plain := c.buf[4:contentEnd] s.XORKeyStream(plain, plain) + if len(plain) == 0 { + return nil, errors.New("ssh: empty packet") + } + padding := plain[0] if padding < 4 { // padding is a byte, so it automatically satisfies diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go index 99f68bd32..bdc356cbd 100644 --- a/vendor/golang.org/x/crypto/ssh/client.go +++ b/vendor/golang.org/x/crypto/ssh/client.go @@ -113,14 +113,18 @@ func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) e return c.clientAuthenticate(config) } -// verifyHostKeySignature verifies the host key obtained in the key -// exchange. -func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error { +// verifyHostKeySignature verifies the host key obtained in the key exchange. +// algo is the negotiated algorithm, and may be a certificate type. +func verifyHostKeySignature(hostKey PublicKey, algo string, result *kexResult) error { sig, rest, ok := parseSignatureBody(result.Signature) if len(rest) > 0 || !ok { return errors.New("ssh: signature parse error") } + if a := underlyingAlgo(algo); sig.Format != a { + return fmt.Errorf("ssh: invalid signature algorithm %q, expected %q", sig.Format, a) + } + return hostKey.Verify(result.H, sig) } @@ -224,11 +228,11 @@ type ClientConfig struct { // be used for the connection. If empty, a reasonable default is used. ClientVersion string - // HostKeyAlgorithms lists the key types that the client will - // accept from the server as host key, in order of + // HostKeyAlgorithms lists the public key algorithms that the client will + // accept from the server for host key authentication, in order of // preference. If empty, a reasonable default is used. Any - // string returned from PublicKey.Type method may be used, or - // any of the CertAlgoXxxx and KeyAlgoXxxx constants. + // string returned from a PublicKey.Type method may be used, or + // any of the CertAlgo and KeyAlgo constants. HostKeyAlgorithms []string // Timeout is the maximum amount of time for the TCP connection to establish. diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go index c611aeb68..409b5ea1d 100644 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "strings" ) type authResult int @@ -29,6 +30,33 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { if err != nil { return err } + // The server may choose to send a SSH_MSG_EXT_INFO at this point (if we + // advertised willingness to receive one, which we always do) or not. See + // RFC 8308, Section 2.4. + extensions := make(map[string][]byte) + if len(packet) > 0 && packet[0] == msgExtInfo { + var extInfo extInfoMsg + if err := Unmarshal(packet, &extInfo); err != nil { + return err + } + payload := extInfo.Payload + for i := uint32(0); i < extInfo.NumExtensions; i++ { + name, rest, ok := parseString(payload) + if !ok { + return parseError(msgExtInfo) + } + value, rest, ok := parseString(rest) + if !ok { + return parseError(msgExtInfo) + } + extensions[string(name)] = value + payload = rest + } + packet, err = c.transport.readPacket() + if err != nil { + return err + } + } var serviceAccept serviceAcceptMsg if err := Unmarshal(packet, &serviceAccept); err != nil { return err @@ -41,7 +69,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { sessionID := c.transport.getSessionID() for auth := AuthMethod(new(noneAuth)); auth != nil; { - ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand) + ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions) if err != nil { return err } @@ -93,7 +121,7 @@ type AuthMethod interface { // If authentication is not successful, a []string of alternative // method names is returned. If the slice is nil, it will be ignored // and the previous set of possible methods will be reused. - auth(session []byte, user string, p packetConn, rand io.Reader) (authResult, []string, error) + auth(session []byte, user string, p packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) // method returns the RFC 4252 method name. method() string @@ -102,7 +130,7 @@ type AuthMethod interface { // "none" authentication, RFC 4252 section 5.2. type noneAuth int -func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { +func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { if err := c.writePacket(Marshal(&userAuthRequestMsg{ User: user, Service: serviceSSH, @@ -122,7 +150,7 @@ func (n *noneAuth) method() string { // a function call, e.g. by prompting the user. type passwordCallback func() (password string, err error) -func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { +func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { type passwordAuthMsg struct { User string `sshtype:"50"` Service string @@ -189,7 +217,46 @@ func (cb publicKeyCallback) method() string { return "publickey" } -func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { +func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (as AlgorithmSigner, algo string) { + keyFormat := signer.PublicKey().Type() + + // Like in sendKexInit, if the public key implements AlgorithmSigner we + // assume it supports all algorithms, otherwise only the key format one. + as, ok := signer.(AlgorithmSigner) + if !ok { + return algorithmSignerWrapper{signer}, keyFormat + } + + extPayload, ok := extensions["server-sig-algs"] + if !ok { + // If there is no "server-sig-algs" extension, fall back to the key + // format algorithm. + return as, keyFormat + } + + // The server-sig-algs extension only carries underlying signature + // algorithm, but we are trying to select a protocol-level public key + // algorithm, which might be a certificate type. Extend the list of server + // supported algorithms to include the corresponding certificate algorithms. + serverAlgos := strings.Split(string(extPayload), ",") + for _, algo := range serverAlgos { + if certAlgo, ok := certificateAlgo(algo); ok { + serverAlgos = append(serverAlgos, certAlgo) + } + } + + keyAlgos := algorithmsForKeyFormat(keyFormat) + algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos) + if err != nil { + // If there is no overlap, try the key anyway with the key format + // algorithm, to support servers that fail to list all supported + // algorithms. + return as, keyFormat + } + return as, algo +} + +func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) { // Authentication is performed by sending an enquiry to test if a key is // acceptable to the remote. If the key is acceptable, the client will // attempt to authenticate with the valid key. If not the client will repeat @@ -201,7 +268,10 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand } var methods []string for _, signer := range signers { - ok, err := validateKey(signer.PublicKey(), user, c) + pub := signer.PublicKey() + as, algo := pickSignatureAlgorithm(signer, extensions) + + ok, err := validateKey(pub, algo, user, c) if err != nil { return authFailure, nil, err } @@ -209,13 +279,13 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand continue } - pub := signer.PublicKey() pubKey := pub.Marshal() - sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{ + data := buildDataSignedForAuth(session, userAuthRequestMsg{ User: user, Service: serviceSSH, Method: cb.method(), - }, []byte(pub.Type()), pubKey)) + }, algo, pubKey) + sign, err := as.SignWithAlgorithm(rand, data, underlyingAlgo(algo)) if err != nil { return authFailure, nil, err } @@ -229,7 +299,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand Service: serviceSSH, Method: cb.method(), HasSig: true, - Algoname: pub.Type(), + Algoname: algo, PubKey: pubKey, Sig: sig, } @@ -266,26 +336,25 @@ func containsMethod(methods []string, method string) bool { } // validateKey validates the key provided is acceptable to the server. -func validateKey(key PublicKey, user string, c packetConn) (bool, error) { +func validateKey(key PublicKey, algo string, user string, c packetConn) (bool, error) { pubKey := key.Marshal() msg := publickeyAuthMsg{ User: user, Service: serviceSSH, Method: "publickey", HasSig: false, - Algoname: key.Type(), + Algoname: algo, PubKey: pubKey, } if err := c.writePacket(Marshal(&msg)); err != nil { return false, err } - return confirmKeyAck(key, c) + return confirmKeyAck(key, algo, c) } -func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { +func confirmKeyAck(key PublicKey, algo string, c packetConn) (bool, error) { pubKey := key.Marshal() - algoname := key.Type() for { packet, err := c.readPacket() @@ -302,14 +371,14 @@ func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { if err := Unmarshal(packet, &msg); err != nil { return false, err } - if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) { + if msg.Algo != algo || !bytes.Equal(msg.PubKey, pubKey) { return false, nil } return true, nil case msgUserAuthFailure: return false, nil default: - return false, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + return false, unexpectedMessageError(msgUserAuthPubKeyOk, packet[0]) } } } @@ -330,6 +399,7 @@ func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMet // along with a list of remaining authentication methods to try next and // an error if an unexpected response was received. func handleAuthResponse(c packetConn) (authResult, []string, error) { + gotMsgExtInfo := false for { packet, err := c.readPacket() if err != nil { @@ -341,6 +411,12 @@ func handleAuthResponse(c packetConn) (authResult, []string, error) { if err := handleBannerResponse(c, packet); err != nil { return authFailure, nil, err } + case msgExtInfo: + // Ignore post-authentication RFC 8308 extensions, once. + if gotMsgExtInfo { + return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + gotMsgExtInfo = true case msgUserAuthFailure: var msg userAuthFailureMsg if err := Unmarshal(packet, &msg); err != nil { @@ -380,10 +456,10 @@ func handleBannerResponse(c packetConn, packet []byte) error { // disabling echoing (e.g. for passwords), and return all the answers. // Challenge may be called multiple times in a single session. After // successful authentication, the server may send a challenge with no -// questions, for which the user and instruction messages should be +// questions, for which the name and instruction messages should be // printed. RFC 4256 section 3.3 details how the UI should behave for // both CLI and GUI environments. -type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error) +type KeyboardInteractiveChallenge func(name, instruction string, questions []string, echos []bool) (answers []string, err error) // KeyboardInteractive returns an AuthMethod using a prompt/response // sequence controlled by the server. @@ -395,7 +471,7 @@ func (cb KeyboardInteractiveChallenge) method() string { return "keyboard-interactive" } -func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { +func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { type initiateMsg struct { User string `sshtype:"50"` Service string @@ -412,6 +488,7 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe return authFailure, nil, err } + gotMsgExtInfo := false for { packet, err := c.readPacket() if err != nil { @@ -425,6 +502,13 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe return authFailure, nil, err } continue + case msgExtInfo: + // Ignore post-authentication RFC 8308 extensions, once. + if gotMsgExtInfo { + return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) + } + gotMsgExtInfo = true + continue case msgUserAuthInfoRequest: // OK case msgUserAuthFailure: @@ -465,7 +549,7 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs") } - answers, err := cb(msg.User, msg.Instruction, prompts, echos) + answers, err := cb(msg.Name, msg.Instruction, prompts, echos) if err != nil { return authFailure, nil, err } @@ -497,9 +581,9 @@ type retryableAuthMethod struct { maxTries int } -func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok authResult, methods []string, err error) { +func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (ok authResult, methods []string, err error) { for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { - ok, methods, err = r.authMethod.auth(session, user, c, rand) + ok, methods, err = r.authMethod.auth(session, user, c, rand, extensions) if ok != authFailure || err != nil { // either success, partial success or error terminate return ok, methods, err } @@ -542,7 +626,7 @@ type gssAPIWithMICCallback struct { target string } -func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { +func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { m := &userAuthRequestMsg{ User: user, Service: serviceSSH, diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go index 290382d05..2a47a61de 100644 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -44,11 +44,11 @@ var preferredCiphers = []string{ // supportedKexAlgos specifies the supported key-exchange algorithms in // preference order. var supportedKexAlgos = []string{ - kexAlgoCurve25519SHA256, + kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, // P384 and P521 are not constant-time yet, but since we don't // reuse ephemeral keys, using them for ECDH should be OK. kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA1, kexAlgoDH1SHA1, + kexAlgoDH14SHA256, kexAlgoDH14SHA1, kexAlgoDH1SHA1, } // serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden @@ -61,18 +61,20 @@ var serverForbiddenKexAlgos = map[string]struct{}{ // preferredKexAlgos specifies the default preference for key-exchange algorithms // in preference order. var preferredKexAlgos = []string{ - kexAlgoCurve25519SHA256, + kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA1, + kexAlgoDH14SHA256, kexAlgoDH14SHA1, } // supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods // of authenticating servers) in preference order. var supportedHostKeyAlgos = []string{ + CertAlgoRSASHA512v01, CertAlgoRSASHA256v01, CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, + KeyAlgoRSASHA512, KeyAlgoRSASHA256, KeyAlgoRSA, KeyAlgoDSA, KeyAlgoED25519, @@ -87,19 +89,33 @@ var supportedMACs = []string{ var supportedCompressions = []string{compressionNone} -// hashFuncs keeps the mapping of supported algorithms to their respective -// hashes needed for signature verification. +// hashFuncs keeps the mapping of supported signature algorithms to their +// respective hashes needed for signing and verification. var hashFuncs = map[string]crypto.Hash{ - KeyAlgoRSA: crypto.SHA1, - KeyAlgoDSA: crypto.SHA1, - KeyAlgoECDSA256: crypto.SHA256, - KeyAlgoECDSA384: crypto.SHA384, - KeyAlgoECDSA521: crypto.SHA512, - CertAlgoRSAv01: crypto.SHA1, - CertAlgoDSAv01: crypto.SHA1, - CertAlgoECDSA256v01: crypto.SHA256, - CertAlgoECDSA384v01: crypto.SHA384, - CertAlgoECDSA521v01: crypto.SHA512, + KeyAlgoRSA: crypto.SHA1, + KeyAlgoRSASHA256: crypto.SHA256, + KeyAlgoRSASHA512: crypto.SHA512, + KeyAlgoDSA: crypto.SHA1, + KeyAlgoECDSA256: crypto.SHA256, + KeyAlgoECDSA384: crypto.SHA384, + KeyAlgoECDSA521: crypto.SHA512, + // KeyAlgoED25519 doesn't pre-hash. + KeyAlgoSKECDSA256: crypto.SHA256, + KeyAlgoSKED25519: crypto.SHA256, +} + +// algorithmsForKeyFormat returns the supported signature algorithms for a given +// public key format (PublicKey.Type), in order of preference. See RFC 8332, +// Section 2. See also the note in sendKexInit on backwards compatibility. +func algorithmsForKeyFormat(keyFormat string) []string { + switch keyFormat { + case KeyAlgoRSA: + return []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA} + case CertAlgoRSAv01: + return []string{CertAlgoRSASHA256v01, CertAlgoRSASHA512v01, CertAlgoRSAv01} + default: + return []string{keyFormat} + } } // unexpectedMessageError results when the SSH message that we received didn't @@ -146,6 +162,11 @@ func (a *directionAlgorithms) rekeyBytes() int64 { return 1 << 30 } +var aeadCiphers = map[string]bool{ + gcmCipherID: true, + chacha20Poly1305ID: true, +} + type algorithms struct { kex string hostKey string @@ -181,14 +202,18 @@ func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMs return } - ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) - if err != nil { - return + if !aeadCiphers[ctos.Cipher] { + ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) + if err != nil { + return + } } - stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) - if err != nil { - return + if !aeadCiphers[stoc.Cipher] { + stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) + if err != nil { + return + } } ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) @@ -272,8 +297,9 @@ func (c *Config) SetDefaults() { } // buildDataSignedForAuth returns the data that is signed in order to prove -// possession of a private key. See RFC 4252, section 7. -func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte { +// possession of a private key. See RFC 4252, section 7. algo is the advertised +// algorithm, and may be a certificate type. +func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo string, pubKey []byte) []byte { data := struct { Session []byte Type byte @@ -281,7 +307,7 @@ func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubK Service string Method string Sign bool - Algo []byte + Algo string PubKey []byte }{ sessionID, diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go index 67b7322c0..f6bff60dc 100644 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ b/vendor/golang.org/x/crypto/ssh/doc.go @@ -12,8 +12,9 @@ the multiplexed nature of SSH is exposed to users that wish to support others. References: - [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD - [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 + + [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD + [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 This package does not fall under the stability promise of the Go language itself, so its API may be changed when pressing needs arise. diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go index 2b10b05a4..653dc4d2c 100644 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -455,14 +455,38 @@ func (t *handshakeTransport) sendKexInit() error { } io.ReadFull(rand.Reader, msg.Cookie[:]) - if len(t.hostKeys) > 0 { + isServer := len(t.hostKeys) > 0 + if isServer { for _, k := range t.hostKeys { - msg.ServerHostKeyAlgos = append( - msg.ServerHostKeyAlgos, k.PublicKey().Type()) + // If k is an AlgorithmSigner, presume it supports all signature algorithms + // associated with the key format. (Ideally AlgorithmSigner would have a + // method to advertise supported algorithms, but it doesn't. This means that + // adding support for a new algorithm is a breaking change, as we will + // immediately negotiate it even if existing implementations don't support + // it. If that ever happens, we'll have to figure something out.) + // If k is not an AlgorithmSigner, we can only assume it only supports the + // algorithms that matches the key format. (This means that Sign can't pick + // a different default.) + keyFormat := k.PublicKey().Type() + if _, ok := k.(AlgorithmSigner); ok { + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algorithmsForKeyFormat(keyFormat)...) + } else { + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat) + } } } else { msg.ServerHostKeyAlgos = t.hostKeyAlgorithms + + // As a client we opt in to receiving SSH_MSG_EXT_INFO so we know what + // algorithms the server supports for public key authentication. See RFC + // 8308, Section 2.1. + if firstKeyExchange := t.sessionID == nil; firstKeyExchange { + msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+1) + msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...) + msg.KexAlgos = append(msg.KexAlgos, "ext-info-c") + } } + packet := Marshal(msg) // writePacket destroys the contents, so save a copy. @@ -582,9 +606,9 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { var result *kexResult if len(t.hostKeys) > 0 { - result, err = t.server(kex, t.algorithms, &magics) + result, err = t.server(kex, &magics) } else { - result, err = t.client(kex, t.algorithms, &magics) + result, err = t.client(kex, &magics) } if err != nil { @@ -611,19 +635,52 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { return nil } -func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { - var hostKey Signer - for _, k := range t.hostKeys { - if algs.hostKey == k.PublicKey().Type() { - hostKey = k +// algorithmSignerWrapper is an AlgorithmSigner that only supports the default +// key format algorithm. +// +// This is technically a violation of the AlgorithmSigner interface, but it +// should be unreachable given where we use this. Anyway, at least it returns an +// error instead of panicing or producing an incorrect signature. +type algorithmSignerWrapper struct { + Signer +} + +func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + if algorithm != underlyingAlgo(a.PublicKey().Type()) { + return nil, errors.New("ssh: internal error: algorithmSignerWrapper invoked with non-default algorithm") + } + return a.Sign(rand, data) +} + +func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner { + for _, k := range hostKeys { + if algo == k.PublicKey().Type() { + return algorithmSignerWrapper{k} + } + k, ok := k.(AlgorithmSigner) + if !ok { + continue + } + for _, a := range algorithmsForKeyFormat(k.PublicKey().Type()) { + if algo == a { + return k + } } } + return nil +} + +func (t *handshakeTransport) server(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) { + hostKey := pickHostKey(t.hostKeys, t.algorithms.hostKey) + if hostKey == nil { + return nil, errors.New("ssh: internal error: negotiated unsupported signature type") + } - r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey) + r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey, t.algorithms.hostKey) return r, err } -func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { +func (t *handshakeTransport) client(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) { result, err := kex.Client(t.conn, t.config.Rand, magics) if err != nil { return nil, err @@ -634,7 +691,7 @@ func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics * return nil, err } - if err := verifyHostKeySignature(hostKey, result); err != nil { + if err := verifyHostKeySignature(hostKey, t.algorithms.hostKey, result); err != nil { return nil, err } diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go index 766e92939..927a90cd4 100644 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -20,12 +20,14 @@ import ( ) const ( - kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" - kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" - kexAlgoECDH256 = "ecdh-sha2-nistp256" - kexAlgoECDH384 = "ecdh-sha2-nistp384" - kexAlgoECDH521 = "ecdh-sha2-nistp521" - kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org" + kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" + kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" + kexAlgoDH14SHA256 = "diffie-hellman-group14-sha256" + kexAlgoECDH256 = "ecdh-sha2-nistp256" + kexAlgoECDH384 = "ecdh-sha2-nistp384" + kexAlgoECDH521 = "ecdh-sha2-nistp521" + kexAlgoCurve25519SHA256LibSSH = "curve25519-sha256@libssh.org" + kexAlgoCurve25519SHA256 = "curve25519-sha256" // For the following kex only the client half contains a production // ready implementation. The server half only consists of a minimal @@ -75,8 +77,9 @@ func (m *handshakeMagics) write(w io.Writer) { // kexAlgorithm abstracts different key exchange algorithms. type kexAlgorithm interface { // Server runs server-side key agreement, signing the result - // with a hostkey. - Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error) + // with a hostkey. algo is the negotiated algorithm, and may + // be a certificate type. + Server(p packetConn, rand io.Reader, magics *handshakeMagics, s AlgorithmSigner, algo string) (*kexResult, error) // Client runs the client-side key agreement. Caller is // responsible for verifying the host key signature. @@ -86,6 +89,7 @@ type kexAlgorithm interface { // dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. type dhGroup struct { g, p, pMinus1 *big.Int + hashFunc crypto.Hash } func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { @@ -96,8 +100,6 @@ func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, } func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - hashFunc := crypto.SHA1 - var x *big.Int for { var err error @@ -132,7 +134,7 @@ func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handsha return nil, err } - h := hashFunc.New() + h := group.hashFunc.New() magics.write(h) writeString(h, kexDHReply.HostKey) writeInt(h, X) @@ -146,12 +148,11 @@ func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handsha K: K, HostKey: kexDHReply.HostKey, Signature: kexDHReply.Signature, - Hash: crypto.SHA1, + Hash: group.hashFunc, }, nil } -func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - hashFunc := crypto.SHA1 +func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { packet, err := c.readPacket() if err != nil { return @@ -179,7 +180,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha hostKeyBytes := priv.PublicKey().Marshal() - h := hashFunc.New() + h := group.hashFunc.New() magics.write(h) writeString(h, hostKeyBytes) writeInt(h, kexDHInit.X) @@ -193,7 +194,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha // H is already a hash, but the hostkey signing will apply its // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H) + sig, err := signAndMarshal(priv, randSource, H, algo) if err != nil { return nil, err } @@ -211,7 +212,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha K: K, HostKey: hostKeyBytes, Signature: sig, - Hash: crypto.SHA1, + Hash: group.hashFunc, }, err } @@ -314,7 +315,7 @@ func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { return true } -func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { +func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { packet, err := c.readPacket() if err != nil { return nil, err @@ -359,7 +360,7 @@ func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, p // H is already a hash, but the hostkey signing will apply its // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, rand, H) + sig, err := signAndMarshal(priv, rand, H, algo) if err != nil { return nil, err } @@ -384,39 +385,62 @@ func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, p }, nil } +// ecHash returns the hash to match the given elliptic curve, see RFC +// 5656, section 6.2.1 +func ecHash(curve elliptic.Curve) crypto.Hash { + bitSize := curve.Params().BitSize + switch { + case bitSize <= 256: + return crypto.SHA256 + case bitSize <= 384: + return crypto.SHA384 + } + return crypto.SHA512 +} + var kexAlgoMap = map[string]kexAlgorithm{} func init() { - // This is the group called diffie-hellman-group1-sha1 in RFC - // 4253 and Oakley Group 2 in RFC 2409. + // This is the group called diffie-hellman-group1-sha1 in + // RFC 4253 and Oakley Group 2 in RFC 2409. p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + hashFunc: crypto.SHA1, } - // This is the group called diffie-hellman-group14-sha1 in RFC - // 4253 and Oakley Group 14 in RFC 3526. + // This are the groups called diffie-hellman-group14-sha1 and + // diffie-hellman-group14-sha256 in RFC 4253 and RFC 8268, + // and Oakley Group 14 in RFC 3526. p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - - kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ + group14 := &dhGroup{ g: new(big.Int).SetInt64(2), p: p, pMinus1: new(big.Int).Sub(p, bigOne), } + kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ + g: group14.g, p: group14.p, pMinus1: group14.pMinus1, + hashFunc: crypto.SHA1, + } + kexAlgoMap[kexAlgoDH14SHA256] = &dhGroup{ + g: group14.g, p: group14.p, pMinus1: group14.pMinus1, + hashFunc: crypto.SHA256, + } + kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} + kexAlgoMap[kexAlgoCurve25519SHA256LibSSH] = &curve25519sha256{} kexAlgoMap[kexAlgoDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1} kexAlgoMap[kexAlgoDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256} } -// curve25519sha256 implements the curve25519-sha256@libssh.org key -// agreement protocol, as described in -// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt +// curve25519sha256 implements the curve25519-sha256 (formerly known as +// curve25519-sha256@libssh.org) key exchange method, as described in RFC 8731. type curve25519sha256 struct{} type curve25519KeyPair struct { @@ -486,7 +510,7 @@ func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handsh }, nil } -func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { +func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { packet, err := c.readPacket() if err != nil { return @@ -527,7 +551,7 @@ func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handsh H := h.Sum(nil) - sig, err := signAndMarshal(priv, rand, H) + sig, err := signAndMarshal(priv, rand, H, algo) if err != nil { return nil, err } @@ -553,7 +577,6 @@ func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handsh // diffie-hellman-group-exchange-sha256 key agreement protocols, // as described in RFC 4419 type dhGEXSHA struct { - g, p *big.Int hashFunc crypto.Hash } @@ -563,14 +586,7 @@ const ( dhGroupExchangeMaximumBits = 8192 ) -func (gex *dhGEXSHA) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { - if theirPublic.Sign() <= 0 || theirPublic.Cmp(gex.p) >= 0 { - return nil, fmt.Errorf("ssh: DH parameter out of bounds") - } - return new(big.Int).Exp(theirPublic, myPrivate, gex.p), nil -} - -func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { +func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { // Send GexRequest kexDHGexRequest := kexDHGexRequestMsg{ MinBits: dhGroupExchangeMinimumBits, @@ -587,35 +603,29 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake return nil, err } - var kexDHGexGroup kexDHGexGroupMsg - if err = Unmarshal(packet, &kexDHGexGroup); err != nil { + var msg kexDHGexGroupMsg + if err = Unmarshal(packet, &msg); err != nil { return nil, err } // reject if p's bit length < dhGroupExchangeMinimumBits or > dhGroupExchangeMaximumBits - if kexDHGexGroup.P.BitLen() < dhGroupExchangeMinimumBits || kexDHGexGroup.P.BitLen() > dhGroupExchangeMaximumBits { - return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", kexDHGexGroup.P.BitLen()) + if msg.P.BitLen() < dhGroupExchangeMinimumBits || msg.P.BitLen() > dhGroupExchangeMaximumBits { + return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", msg.P.BitLen()) } - gex.p = kexDHGexGroup.P - gex.g = kexDHGexGroup.G - - // Check if g is safe by verifing that g > 1 and g < p - 1 - one := big.NewInt(1) - var pMinusOne = &big.Int{} - pMinusOne.Sub(gex.p, one) - if gex.g.Cmp(one) != 1 && gex.g.Cmp(pMinusOne) != -1 { + // Check if g is safe by verifying that 1 < g < p-1 + pMinusOne := new(big.Int).Sub(msg.P, bigOne) + if msg.G.Cmp(bigOne) <= 0 || msg.G.Cmp(pMinusOne) >= 0 { return nil, fmt.Errorf("ssh: server provided gex g is not safe") } // Send GexInit - var pHalf = &big.Int{} - pHalf.Rsh(gex.p, 1) + pHalf := new(big.Int).Rsh(msg.P, 1) x, err := rand.Int(randSource, pHalf) if err != nil { return nil, err } - X := new(big.Int).Exp(gex.g, x, gex.p) + X := new(big.Int).Exp(msg.G, x, msg.P) kexDHGexInit := kexDHGexInitMsg{ X: X, } @@ -634,13 +644,13 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake return nil, err } - kInt, err := gex.diffieHellman(kexDHGexReply.Y, x) - if err != nil { - return nil, err + if kexDHGexReply.Y.Cmp(bigOne) <= 0 || kexDHGexReply.Y.Cmp(pMinusOne) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") } + kInt := new(big.Int).Exp(kexDHGexReply.Y, x, msg.P) - // Check if k is safe by verifing that k > 1 and k < p - 1 - if kInt.Cmp(one) != 1 && kInt.Cmp(pMinusOne) != -1 { + // Check if k is safe by verifying that k > 1 and k < p - 1 + if kInt.Cmp(bigOne) <= 0 || kInt.Cmp(pMinusOne) >= 0 { return nil, fmt.Errorf("ssh: derived k is not safe") } @@ -650,8 +660,8 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) - writeInt(h, gex.p) - writeInt(h, gex.g) + writeInt(h, msg.P) + writeInt(h, msg.G) writeInt(h, X) writeInt(h, kexDHGexReply.Y) K := make([]byte, intLength(kInt)) @@ -670,7 +680,7 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake // Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256. // // This is a minimal implementation to satisfy the automated tests. -func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { +func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { // Receive GexRequest packet, err := c.readPacket() if err != nil { @@ -681,35 +691,17 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake return } - // smoosh the user's preferred size into our own limits - if kexDHGexRequest.PreferedBits > dhGroupExchangeMaximumBits { - kexDHGexRequest.PreferedBits = dhGroupExchangeMaximumBits - } - if kexDHGexRequest.PreferedBits < dhGroupExchangeMinimumBits { - kexDHGexRequest.PreferedBits = dhGroupExchangeMinimumBits - } - // fix min/max if they're inconsistent. technically, we could just pout - // and hang up, but there's no harm in giving them the benefit of the - // doubt and just picking a bitsize for them. - if kexDHGexRequest.MinBits > kexDHGexRequest.PreferedBits { - kexDHGexRequest.MinBits = kexDHGexRequest.PreferedBits - } - if kexDHGexRequest.MaxBits < kexDHGexRequest.PreferedBits { - kexDHGexRequest.MaxBits = kexDHGexRequest.PreferedBits - } - // Send GexGroup // This is the group called diffie-hellman-group14-sha1 in RFC // 4253 and Oakley Group 14 in RFC 3526. p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - gex.p = p - gex.g = big.NewInt(2) + g := big.NewInt(2) - kexDHGexGroup := kexDHGexGroupMsg{ - P: gex.p, - G: gex.g, + msg := &kexDHGexGroupMsg{ + P: p, + G: g, } - if err := c.writePacket(Marshal(&kexDHGexGroup)); err != nil { + if err := c.writePacket(Marshal(msg)); err != nil { return nil, err } @@ -723,19 +715,19 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake return } - var pHalf = &big.Int{} - pHalf.Rsh(gex.p, 1) + pHalf := new(big.Int).Rsh(p, 1) y, err := rand.Int(randSource, pHalf) if err != nil { return } + Y := new(big.Int).Exp(g, y, p) - Y := new(big.Int).Exp(gex.g, y, gex.p) - kInt, err := gex.diffieHellman(kexDHGexInit.X, y) - if err != nil { - return nil, err + pMinusOne := new(big.Int).Sub(p, bigOne) + if kexDHGexInit.X.Cmp(bigOne) <= 0 || kexDHGexInit.X.Cmp(pMinusOne) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") } + kInt := new(big.Int).Exp(kexDHGexInit.X, y, p) hostKeyBytes := priv.PublicKey().Marshal() @@ -745,8 +737,8 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) - writeInt(h, gex.p) - writeInt(h, gex.g) + writeInt(h, p) + writeInt(h, g) writeInt(h, kexDHGexInit.X) writeInt(h, Y) @@ -758,7 +750,7 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake // H is already a hash, but the hostkey signing will apply its // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H) + sig, err := signAndMarshal(priv, randSource, H, algo) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index 31f26349a..1c7de1a6d 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -30,8 +30,9 @@ import ( "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf" ) -// These constants represent the algorithm names for key types supported by this -// package. +// Public key algorithms names. These values can appear in PublicKey.Type, +// ClientConfig.HostKeyAlgorithms, Signature.Format, or as AlgorithmSigner +// arguments. const ( KeyAlgoRSA = "ssh-rsa" KeyAlgoDSA = "ssh-dss" @@ -41,16 +42,21 @@ const ( KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" KeyAlgoED25519 = "ssh-ed25519" KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com" + + // KeyAlgoRSASHA256 and KeyAlgoRSASHA512 are only public key algorithms, not + // public key formats, so they can't appear as a PublicKey.Type. The + // corresponding PublicKey.Type is KeyAlgoRSA. See RFC 8332, Section 2. + KeyAlgoRSASHA256 = "rsa-sha2-256" + KeyAlgoRSASHA512 = "rsa-sha2-512" ) -// These constants represent non-default signature algorithms that are supported -// as algorithm parameters to AlgorithmSigner.SignWithAlgorithm methods. See -// [PROTOCOL.agent] section 4.5.1 and -// https://tools.ietf.org/html/draft-ietf-curdle-rsa-sha2-10 const ( - SigAlgoRSA = "ssh-rsa" - SigAlgoRSASHA2256 = "rsa-sha2-256" - SigAlgoRSASHA2512 = "rsa-sha2-512" + // Deprecated: use KeyAlgoRSA. + SigAlgoRSA = KeyAlgoRSA + // Deprecated: use KeyAlgoRSASHA256. + SigAlgoRSASHA2256 = KeyAlgoRSASHA256 + // Deprecated: use KeyAlgoRSASHA512. + SigAlgoRSASHA2512 = KeyAlgoRSASHA512 ) // parsePubKey parses a public key of the given algorithm. @@ -70,7 +76,7 @@ func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err err case KeyAlgoSKED25519: return parseSKEd25519(in) case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: - cert, err := parseCert(in, certToPrivAlgo(algo)) + cert, err := parseCert(in, certKeyAlgoNames[algo]) if err != nil { return nil, nil, err } @@ -289,18 +295,21 @@ func MarshalAuthorizedKey(key PublicKey) []byte { return b.Bytes() } -// PublicKey is an abstraction of different types of public keys. +// PublicKey represents a public key using an unspecified algorithm. +// +// Some PublicKeys provided by this package also implement CryptoPublicKey. type PublicKey interface { - // Type returns the key's type, e.g. "ssh-rsa". + // Type returns the key format name, e.g. "ssh-rsa". Type() string - // Marshal returns the serialized key data in SSH wire format, - // with the name prefix. To unmarshal the returned data, use - // the ParsePublicKey function. + // Marshal returns the serialized key data in SSH wire format, with the name + // prefix. To unmarshal the returned data, use the ParsePublicKey function. Marshal() []byte - // Verify that sig is a signature on the given data using this - // key. This function will hash the data appropriately first. + // Verify that sig is a signature on the given data using this key. This + // method will hash the data appropriately first. sig.Format is allowed to + // be any signature algorithm compatible with the key type, the caller + // should check if it has more stringent requirements. Verify(data []byte, sig *Signature) error } @@ -311,25 +320,32 @@ type CryptoPublicKey interface { } // A Signer can create signatures that verify against a public key. +// +// Some Signers provided by this package also implement AlgorithmSigner. type Signer interface { - // PublicKey returns an associated PublicKey instance. + // PublicKey returns the associated PublicKey. PublicKey() PublicKey - // Sign returns raw signature for the given data. This method - // will apply the hash specified for the keytype to the data. + // Sign returns a signature for the given data. This method will hash the + // data appropriately first. The signature algorithm is expected to match + // the key format returned by the PublicKey.Type method (and not to be any + // alternative algorithm supported by the key format). Sign(rand io.Reader, data []byte) (*Signature, error) } -// A AlgorithmSigner is a Signer that also supports specifying a specific -// algorithm to use for signing. +// An AlgorithmSigner is a Signer that also supports specifying an algorithm to +// use for signing. +// +// An AlgorithmSigner can't advertise the algorithms it supports, so it should +// be prepared to be invoked with every algorithm supported by the public key +// format. type AlgorithmSigner interface { Signer - // SignWithAlgorithm is like Signer.Sign, but allows specification of a - // non-default signing algorithm. See the SigAlgo* constants in this - // package for signature algorithms supported by this package. Callers may - // pass an empty string for the algorithm in which case the AlgorithmSigner - // will use its default algorithm. + // SignWithAlgorithm is like Signer.Sign, but allows specifying a desired + // signing algorithm. Callers may pass an empty string for the algorithm in + // which case the AlgorithmSigner will use a default algorithm. This default + // doesn't currently control any behavior in this package. SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) } @@ -381,17 +397,11 @@ func (r *rsaPublicKey) Marshal() []byte { } func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { - var hash crypto.Hash - switch sig.Format { - case SigAlgoRSA: - hash = crypto.SHA1 - case SigAlgoRSASHA2256: - hash = crypto.SHA256 - case SigAlgoRSASHA2512: - hash = crypto.SHA512 - default: + supportedAlgos := algorithmsForKeyFormat(r.Type()) + if !contains(supportedAlgos, sig.Format) { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) } + hash := hashFuncs[sig.Format] h := hash.New() h.Write(data) digest := h.Sum(nil) @@ -466,7 +476,7 @@ func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { if sig.Format != k.Type() { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } - h := crypto.SHA1.New() + h := hashFuncs[sig.Format].New() h.Write(data) digest := h.Sum(nil) @@ -499,7 +509,7 @@ func (k *dsaPrivateKey) PublicKey() PublicKey { } func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { - return k.SignWithAlgorithm(rand, data, "") + return k.SignWithAlgorithm(rand, data, k.PublicKey().Type()) } func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { @@ -507,7 +517,7 @@ func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) } - h := crypto.SHA1.New() + h := hashFuncs[k.PublicKey().Type()].New() h.Write(data) digest := h.Sum(nil) r, s, err := dsa.Sign(rand, k.PrivateKey, digest) @@ -603,19 +613,6 @@ func supportedEllipticCurve(curve elliptic.Curve) bool { return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() } -// ecHash returns the hash to match the given elliptic curve, see RFC -// 5656, section 6.2.1 -func ecHash(curve elliptic.Curve) crypto.Hash { - bitSize := curve.Params().BitSize - switch { - case bitSize <= 256: - return crypto.SHA256 - case bitSize <= 384: - return crypto.SHA384 - } - return crypto.SHA512 -} - // parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { var w struct { @@ -671,7 +668,7 @@ func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } - h := ecHash(k.Curve).New() + h := hashFuncs[sig.Format].New() h.Write(data) digest := h.Sum(nil) @@ -775,7 +772,7 @@ func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } - h := ecHash(k.Curve).New() + h := hashFuncs[sig.Format].New() h.Write([]byte(k.application)) appDigest := h.Sum(nil) @@ -874,7 +871,7 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { return fmt.Errorf("invalid size %d for Ed25519 public key", l) } - h := sha256.New() + h := hashFuncs[sig.Format].New() h.Write([]byte(k.application)) appDigest := h.Sum(nil) @@ -961,44 +958,20 @@ func (s *wrappedSigner) PublicKey() PublicKey { } func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.SignWithAlgorithm(rand, data, "") + return s.SignWithAlgorithm(rand, data, s.pubKey.Type()) } func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - var hashFunc crypto.Hash - - if _, ok := s.pubKey.(*rsaPublicKey); ok { - // RSA keys support a few hash functions determined by the requested signature algorithm - switch algorithm { - case "", SigAlgoRSA: - algorithm = SigAlgoRSA - hashFunc = crypto.SHA1 - case SigAlgoRSASHA2256: - hashFunc = crypto.SHA256 - case SigAlgoRSASHA2512: - hashFunc = crypto.SHA512 - default: - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } - } else { - // The only supported algorithm for all other key types is the same as the type of the key - if algorithm == "" { - algorithm = s.pubKey.Type() - } else if algorithm != s.pubKey.Type() { - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } + if algorithm == "" { + algorithm = s.pubKey.Type() + } - switch key := s.pubKey.(type) { - case *dsaPublicKey: - hashFunc = crypto.SHA1 - case *ecdsaPublicKey: - hashFunc = ecHash(key.Curve) - case ed25519PublicKey: - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } + supportedAlgos := algorithmsForKeyFormat(s.pubKey.Type()) + if !contains(supportedAlgos, algorithm) { + return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type()) } + hashFunc := hashFuncs[algorithm] var digest []byte if hashFunc != 0 { h := hashFunc.New() diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go index ac41a4168..19bc67c46 100644 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ b/vendor/golang.org/x/crypto/ssh/messages.go @@ -141,6 +141,14 @@ type serviceAcceptMsg struct { Service string `sshtype:"6"` } +// See RFC 8308, section 2.3 +const msgExtInfo = 7 + +type extInfoMsg struct { + NumExtensions uint32 `sshtype:"7"` + Payload []byte `ssh:"rest"` +} + // See RFC 4252, section 5. const msgUserAuthRequest = 50 @@ -180,11 +188,11 @@ const msgUserAuthInfoRequest = 60 const msgUserAuthInfoResponse = 61 type userAuthInfoRequestMsg struct { - User string `sshtype:"60"` - Instruction string - DeprecatedLanguage string - NumPrompts uint32 - Prompts []byte `ssh:"rest"` + Name string `sshtype:"60"` + Instruction string + Language string + NumPrompts uint32 + Prompts []byte `ssh:"rest"` } // See RFC 4254, section 5.1. @@ -782,6 +790,8 @@ func decode(packet []byte) (interface{}, error) { msg = new(serviceRequestMsg) case msgServiceAccept: msg = new(serviceAcceptMsg) + case msgExtInfo: + msg = new(extInfoMsg) case msgKexInit: msg = new(kexInitMsg) case msgKexDHInit: @@ -843,6 +853,7 @@ var packetTypeNames = map[byte]string{ msgDisconnect: "disconnectMsg", msgServiceRequest: "serviceRequestMsg", msgServiceAccept: "serviceAcceptMsg", + msgExtInfo: "extInfoMsg", msgKexInit: "kexInitMsg", msgKexDHInit: "kexDHInitMsg", msgKexDHReply: "kexDHReplyMsg", diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index b6911e830..70045bdfd 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -120,7 +120,7 @@ type ServerConfig struct { } // AddHostKey adds a private key as a host key. If an existing host -// key exists with the same algorithm, it is overwritten. Each server +// key exists with the same public key format, it is replaced. Each server // config must have at least one host key. func (s *ServerConfig) AddHostKey(key Signer) { for i, k := range s.hostKeys { @@ -212,9 +212,10 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha } // signAndMarshal signs the data with the appropriate algorithm, -// and serializes the result in SSH wire format. -func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) { - sig, err := k.Sign(rand, data) +// and serializes the result in SSH wire format. algo is the negotiate +// algorithm and may be a certificate type. +func signAndMarshal(k AlgorithmSigner, rand io.Reader, data []byte, algo string) ([]byte, error) { + sig, err := k.SignWithAlgorithm(rand, data, underlyingAlgo(algo)) if err != nil { return nil, err } @@ -284,7 +285,7 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) func isAcceptableAlgo(algo string) bool { switch algo { - case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519, + case KeyAlgoRSA, KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519, CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: return true } @@ -553,6 +554,7 @@ userAuthLoop: if !ok || len(payload) > 0 { return nil, parseError(msgUserAuthRequest) } + // Ensure the public key algo and signature algo // are supported. Compare the private key // algorithm name that corresponds to algo with @@ -562,7 +564,12 @@ userAuthLoop: authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) break } - signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData) + if underlyingAlgo(algo) != sig.Format { + authErr = fmt.Errorf("ssh: signature %q not compatible with selected algorithm %q", sig.Format, algo) + break + } + + signedData := buildDataSignedForAuth(sessionID, userAuthReq, algo, pubKeyData) if err := pubKey.Verify(signedData, sig); err != nil { return nil, err @@ -633,6 +640,30 @@ userAuthLoop: } authFailures++ + if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries { + // If we have hit the max attempts, don't bother sending the + // final SSH_MSG_USERAUTH_FAILURE message, since there are + // no more authentication methods which can be attempted, + // and this message may cause the client to re-attempt + // authentication while we send the disconnect message. + // Continue, and trigger the disconnect at the start of + // the loop. + // + // The SSH specification is somewhat confusing about this, + // RFC 4252 Section 5.1 requires each authentication failure + // be responded to with a respective SSH_MSG_USERAUTH_FAILURE + // message, but Section 4 says the server should disconnect + // after some number of attempts, but it isn't explicit which + // message should take precedence (i.e. should there be a failure + // message than a disconnect message, or if we are going to + // disconnect, should we only send that message.) + // + // Either way, OpenSSH disconnects immediately after the last + // failed authnetication attempt, and given they are typically + // considered the golden implementation it seems reasonable + // to match that behavior. + continue + } var failureMsg userAuthFailureMsg if config.PasswordCallback != nil { @@ -670,7 +701,7 @@ type sshClientKeyboardInteractive struct { *connection } -func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) { +func (c *sshClientKeyboardInteractive) Challenge(name, instruction string, questions []string, echos []bool) (answers []string, err error) { if len(questions) != len(echos) { return nil, errors.New("ssh: echos and questions must have equal length") } @@ -682,6 +713,7 @@ func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, quest } if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ + Name: name, Instruction: instruction, NumPrompts: uint32(len(questions)), Prompts: prompts, diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go index d3321f6b7..eca31a22d 100644 --- a/vendor/golang.org/x/crypto/ssh/session.go +++ b/vendor/golang.org/x/crypto/ssh/session.go @@ -85,6 +85,7 @@ const ( IXANY = 39 IXOFF = 40 IMAXBEL = 41 + IUTF8 = 42 // RFC 8160 ISIG = 50 ICANON = 51 XCASE = 52 diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go index 49ddc2e7d..acf5a21bb 100644 --- a/vendor/golang.org/x/crypto/ssh/transport.go +++ b/vendor/golang.org/x/crypto/ssh/transport.go @@ -238,15 +238,19 @@ var ( // (to setup server->client keys) or clientKeys (for client->server keys). func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { cipherMode := cipherModes[algs.Cipher] - macMode := macModes[algs.MAC] iv := make([]byte, cipherMode.ivSize) key := make([]byte, cipherMode.keySize) - macKey := make([]byte, macMode.keySize) generateKeyMaterial(iv, d.ivTag, kex) generateKeyMaterial(key, d.keyTag, kex) - generateKeyMaterial(macKey, d.macKeyTag, kex) + + var macKey []byte + if !aeadCiphers[algs.Cipher] { + macMode := macModes[algs.MAC] + macKey = make([]byte, macMode.keySize) + generateKeyMaterial(macKey, d.macKeyTag, kex) + } return cipherModes[algs.Cipher].create(key, iv, macKey, algs) } diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index a3c021d3f..cf66309c4 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -21,9 +21,9 @@ // explicitly to each function that needs it. The Context should be the first // parameter, typically named ctx: // -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } // // Do not pass a nil Context, even if a function permits it. Pass context.TODO // if you are unsure about which Context to use. diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go index 344bd1433..0a54bdbcc 100644 --- a/vendor/golang.org/x/net/context/go17.go +++ b/vendor/golang.org/x/net/context/go17.go @@ -54,11 +54,11 @@ func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete: // -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { return WithDeadline(parent, time.Now().Add(timeout)) } diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go index 5270db5db..7b6b68511 100644 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -264,11 +264,11 @@ func (c *timerCtx) cancel(removeFromParent bool, err error) { // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete: // -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { return WithDeadline(parent, time.Now().Add(timeout)) } diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go index e7de24ee6..6e071e852 100644 --- a/vendor/golang.org/x/net/http/httpguts/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -137,11 +137,13 @@ func trimOWS(x string) string { // contains token amongst its comma-separated tokens, ASCII // case-insensitively. func headerValueContainsToken(v string, token string) bool { - v = trimOWS(v) - if comma := strings.IndexByte(v, ','); comma != -1 { - return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) + for comma := strings.IndexByte(v, ','); comma != -1; comma = strings.IndexByte(v, ',') { + if tokenEqual(trimOWS(v[:comma]), token) { + return true + } + v = v[comma+1:] } - return tokenEqual(v, token) + return tokenEqual(trimOWS(v), token) } // lowerASCII returns the ASCII lowercase version of b. @@ -171,13 +173,15 @@ func tokenEqual(t1, t2 string) bool { // isLWS reports whether b is linear white space, according // to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// LWS = [CRLF] 1*( SP | HT ) +// +// LWS = [CRLF] 1*( SP | HT ) func isLWS(b byte) bool { return b == ' ' || b == '\t' } // isCTL reports whether b is a control byte, according // to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// CTL = +// +// CTL = func isCTL(b byte) bool { const del = 0x7f // a CTL return b < ' ' || b == del @@ -187,12 +191,13 @@ func isCTL(b byte) bool { // HTTP/2 imposes the additional restriction that uppercase ASCII // letters are not allowed. // -// RFC 7230 says: -// header-field = field-name ":" OWS field-value OWS -// field-name = token -// token = 1*tchar -// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / -// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +// RFC 7230 says: +// +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA func ValidHeaderFieldName(v string) bool { if len(v) == 0 { return false @@ -265,27 +270,28 @@ var validHostByte = [256]bool{ // ValidHeaderFieldValue reports whether v is a valid "field-value" according to // http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : // -// message-header = field-name ":" [ field-value ] -// field-value = *( field-content | LWS ) -// field-content = +// message-header = field-name ":" [ field-value ] +// field-value = *( field-content | LWS ) +// field-content = // // http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : // -// TEXT = -// LWS = [CRLF] 1*( SP | HT ) -// CTL = +// TEXT = +// LWS = [CRLF] 1*( SP | HT ) +// CTL = // // RFC 7230 says: -// field-value = *( field-content / obs-fold ) -// obj-fold = N/A to http2, and deprecated -// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] -// field-vchar = VCHAR / obs-text -// obs-text = %x80-FF -// VCHAR = "any visible [USASCII] character" +// +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" // // http2 further says: "Similarly, HTTP/2 allows header field values // that are not valid. While most of the values that can be encoded diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile index 53fc52579..851224595 100644 --- a/vendor/golang.org/x/net/http2/Dockerfile +++ b/vendor/golang.org/x/net/http2/Dockerfile @@ -38,7 +38,7 @@ RUN make RUN make install WORKDIR /root -RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz +RUN wget https://curl.se/download/curl-7.45.0.tar.gz RUN tar -zxvf curl-7.45.0.tar.gz WORKDIR /root/curl-7.45.0 RUN ./configure --with-ssl --with-nghttp2=/usr/local diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README deleted file mode 100644 index 360d5aa37..000000000 --- a/vendor/golang.org/x/net/http2/README +++ /dev/null @@ -1,20 +0,0 @@ -This is a work-in-progress HTTP/2 implementation for Go. - -It will eventually live in the Go standard library and won't require -any changes to your code to use. It will just be automatic. - -Status: - -* The server support is pretty good. A few things are missing - but are being worked on. -* The client work has just started but shares a lot of code - is coming along much quicker. - -Docs are at https://godoc.org/golang.org/x/net/http2 - -Demo test server at https://http2.golang.org/ - -Help & bug reports welcome! - -Contributing: https://golang.org/doc/contribute.html -Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/vendor/golang.org/x/net/http2/ascii.go b/vendor/golang.org/x/net/http2/ascii.go new file mode 100644 index 000000000..17caa2058 --- /dev/null +++ b/vendor/golang.org/x/net/http2/ascii.go @@ -0,0 +1,53 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "strings" + +// The HTTP protocols are defined in terms of ASCII, not Unicode. This file +// contains helper functions which may use Unicode-aware functions which would +// otherwise be unsafe and could introduce vulnerabilities if used improperly. + +// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t +// are equal, ASCII-case-insensitively. +func asciiEqualFold(s, t string) bool { + if len(s) != len(t) { + return false + } + for i := 0; i < len(s); i++ { + if lower(s[i]) != lower(t[i]) { + return false + } + } + return true +} + +// lower returns the ASCII lowercase version of b. +func lower(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// isASCIIPrint returns whether s is ASCII and printable according to +// https://tools.ietf.org/html/rfc20#section-4.2. +func isASCIIPrint(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] < ' ' || s[i] > '~' { + return false + } + } + return true +} + +// asciiToLower returns the lowercase version of s if s is ASCII and printable, +// and whether or not it was. +func asciiToLower(s string) (lower string, ok bool) { + if !isASCIIPrint(s) { + return "", false + } + return strings.ToLower(s), true +} diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go index 3a67636fe..780968d6c 100644 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -7,13 +7,21 @@ package http2 import ( + "context" "crypto/tls" + "errors" "net/http" "sync" ) // ClientConnPool manages a pool of HTTP/2 client connections. type ClientConnPool interface { + // GetClientConn returns a specific HTTP/2 connection (usually + // a TLS-TCP connection) to an HTTP/2 server. On success, the + // returned ClientConn accounts for the upcoming RoundTrip + // call, so the caller should not omit it. If the caller needs + // to, ClientConn.RoundTrip can be called with a bogus + // new(http.Request) to release the stream reservation. GetClientConn(req *http.Request, addr string) (*ClientConn, error) MarkDead(*ClientConn) } @@ -40,7 +48,7 @@ type clientConnPool struct { conns map[string][]*ClientConn // key is host:port dialing map[string]*dialCall // currently in-flight dials keys map[*ClientConn][]string - addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls + addConnCalls map[string]*addConnCall // in-flight addConnIfNeeded calls } func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { @@ -52,88 +60,85 @@ const ( noDialOnMiss = false ) -// shouldTraceGetConn reports whether getClientConn should call any -// ClientTrace.GetConn hook associated with the http.Request. -// -// This complexity is needed to avoid double calls of the GetConn hook -// during the back-and-forth between net/http and x/net/http2 (when the -// net/http.Transport is upgraded to also speak http2), as well as support -// the case where x/net/http2 is being used directly. -func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool { - // If our Transport wasn't made via ConfigureTransport, always - // trace the GetConn hook if provided, because that means the - // http2 package is being used directly and it's the one - // dialing, as opposed to net/http. - if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok { - return true - } - // Otherwise, only use the GetConn hook if this connection has - // been used previously for other requests. For fresh - // connections, the net/http package does the dialing. - return !st.freshConn -} - func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { + // TODO(dneil): Dial a new connection when t.DisableKeepAlives is set? if isConnectionCloseRequest(req) && dialOnMiss { // It gets its own connection. traceGetConn(req, addr) const singleUse = true - cc, err := p.t.dialClientConn(addr, singleUse) + cc, err := p.t.dialClientConn(req.Context(), addr, singleUse) if err != nil { return nil, err } return cc, nil } - p.mu.Lock() - for _, cc := range p.conns[addr] { - if st := cc.idleState(); st.canTakeNewRequest { - if p.shouldTraceGetConn(st) { - traceGetConn(req, addr) + for { + p.mu.Lock() + for _, cc := range p.conns[addr] { + if cc.ReserveNewRequest() { + // When a connection is presented to us by the net/http package, + // the GetConn hook has already been called. + // Don't call it a second time here. + if !cc.getConnCalled { + traceGetConn(req, addr) + } + cc.getConnCalled = false + p.mu.Unlock() + return cc, nil } + } + if !dialOnMiss { p.mu.Unlock() - return cc, nil + return nil, ErrNoCachedConn } - } - if !dialOnMiss { + traceGetConn(req, addr) + call := p.getStartDialLocked(req.Context(), addr) p.mu.Unlock() - return nil, ErrNoCachedConn + <-call.done + if shouldRetryDial(call, req) { + continue + } + cc, err := call.res, call.err + if err != nil { + return nil, err + } + if cc.ReserveNewRequest() { + return cc, nil + } } - traceGetConn(req, addr) - call := p.getStartDialLocked(addr) - p.mu.Unlock() - <-call.done - return call.res, call.err } // dialCall is an in-flight Transport dial call to a host. type dialCall struct { - _ incomparable - p *clientConnPool + _ incomparable + p *clientConnPool + // the context associated with the request + // that created this dialCall + ctx context.Context done chan struct{} // closed when done res *ClientConn // valid after done is closed err error // valid after done is closed } // requires p.mu is held. -func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { +func (p *clientConnPool) getStartDialLocked(ctx context.Context, addr string) *dialCall { if call, ok := p.dialing[addr]; ok { // A dial is already in-flight. Don't start another. return call } - call := &dialCall{p: p, done: make(chan struct{})} + call := &dialCall{p: p, done: make(chan struct{}), ctx: ctx} if p.dialing == nil { p.dialing = make(map[string]*dialCall) } p.dialing[addr] = call - go call.dial(addr) + go call.dial(call.ctx, addr) return call } // run in its own goroutine. -func (c *dialCall) dial(addr string) { +func (c *dialCall) dial(ctx context.Context, addr string) { const singleUse = false // shared conn - c.res, c.err = c.p.t.dialClientConn(addr, singleUse) - close(c.done) + c.res, c.err = c.p.t.dialClientConn(ctx, addr, singleUse) c.p.mu.Lock() delete(c.p.dialing, addr) @@ -141,6 +146,8 @@ func (c *dialCall) dial(addr string) { c.p.addConnLocked(addr, c.res) } c.p.mu.Unlock() + + close(c.done) } // addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't @@ -195,6 +202,7 @@ func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { if err != nil { c.err = err } else { + cc.getConnCalled = true // already called by the net/http package p.addConnLocked(key, cc) } delete(p.addConnCalls, key) @@ -276,3 +284,28 @@ type noDialClientConnPool struct{ *clientConnPool } func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { return p.getClientConn(req, addr, noDialOnMiss) } + +// shouldRetryDial reports whether the current request should +// retry dialing after the call finished unsuccessfully, for example +// if the dial was canceled because of a context cancellation or +// deadline expiry. +func shouldRetryDial(call *dialCall, req *http.Request) bool { + if call.err == nil { + // No error, no need to retry + return false + } + if call.ctx == req.Context() { + // If the call has the same context as the request, the dial + // should not be retried, since any cancellation will have come + // from this request. + return false + } + if !errors.Is(call.err, context.Canceled) && !errors.Is(call.err, context.DeadlineExceeded) { + // If the call error is not because of a context cancellation or a deadline expiry, + // the dial should not be retried. + return false + } + // Only retry if the error is a context cancellation error or deadline expiry + // and the context associated with the call was canceled or expired. + return call.ctx.Err() != nil +} diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go index 71f2c4631..f2067dabc 100644 --- a/vendor/golang.org/x/net/http2/errors.go +++ b/vendor/golang.org/x/net/http2/errors.go @@ -53,6 +53,13 @@ func (e ErrCode) String() string { return fmt.Sprintf("unknown error code 0x%x", uint32(e)) } +func (e ErrCode) stringToken() string { + if s, ok := errCodeName[e]; ok { + return s + } + return fmt.Sprintf("ERR_UNKNOWN_%d", uint32(e)) +} + // ConnectionError is an error that results in the termination of the // entire connection. type ConnectionError ErrCode @@ -67,6 +74,11 @@ type StreamError struct { Cause error // optional additional detail } +// errFromPeer is a sentinel error value for StreamError.Cause to +// indicate that the StreamError was sent from the peer over the wire +// and wasn't locally generated in the Transport. +var errFromPeer = errors.New("received from peer") + func streamError(id uint32, code ErrCode) StreamError { return StreamError{StreamID: id, Code: code} } @@ -124,7 +136,7 @@ func (e headerFieldNameError) Error() string { type headerFieldValueError string func (e headerFieldValueError) Error() string { - return fmt.Sprintf("invalid header field value %q", string(e)) + return fmt.Sprintf("invalid header field value for %q", string(e)) } var ( diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 514c126c5..0178647ee 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -122,7 +122,7 @@ var flagName = map[FrameType]map[Flags]string{ // a frameParser parses a frame given its FrameHeader and payload // bytes. The length of payload will always equal fh.Length (which // might be 0). -type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) +type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) var frameParsers = map[FrameType]frameParser{ FrameData: parseDataFrame, @@ -267,6 +267,11 @@ type Framer struct { lastFrame Frame errDetail error + // countError is a non-nil func that's called on a frame parse + // error with some unique error path token. It's initialized + // from Transport.CountError or Server.CountError. + countError func(errToken string) + // lastHeaderStream is non-zero if the last frame was an // unfinished HEADERS/CONTINUATION. lastHeaderStream uint32 @@ -426,6 +431,7 @@ func NewFramer(w io.Writer, r io.Reader) *Framer { fr := &Framer{ w: w, r: r, + countError: func(string) {}, logReads: logFrameReads, logWrites: logFrameWrites, debugReadLoggerf: log.Printf, @@ -500,7 +506,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { if _, err := io.ReadFull(fr.r, payload); err != nil { return nil, err } - f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload) + f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload) if err != nil { if ce, ok := err.(connError); ok { return nil, fr.connError(ce.Code, ce.Reason) @@ -588,13 +594,14 @@ func (f *DataFrame) Data() []byte { return f.data } -func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) { +func parseDataFrame(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) { if fh.StreamID == 0 { // DATA frames MUST be associated with a stream. If a // DATA frame is received whose stream identifier // field is 0x0, the recipient MUST respond with a // connection error (Section 5.4.1) of type // PROTOCOL_ERROR. + countError("frame_data_stream_0") return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} } f := fc.getDataFrame() @@ -605,6 +612,7 @@ func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, erro var err error payload, padSize, err = readByte(payload) if err != nil { + countError("frame_data_pad_byte_short") return nil, err } } @@ -613,6 +621,7 @@ func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, erro // length of the frame payload, the recipient MUST // treat this as a connection error. // Filed: https://github.com/http2/http2-spec/issues/610 + countError("frame_data_pad_too_big") return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} } f.data = payload[:len(payload)-int(padSize)] @@ -695,7 +704,7 @@ type SettingsFrame struct { p []byte } -func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { +func parseSettingsFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) { if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { // When this (ACK 0x1) bit is set, the payload of the // SETTINGS frame MUST be empty. Receipt of a @@ -703,6 +712,7 @@ func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) // field value other than 0 MUST be treated as a // connection error (Section 5.4.1) of type // FRAME_SIZE_ERROR. + countError("frame_settings_ack_with_length") return nil, ConnectionError(ErrCodeFrameSize) } if fh.StreamID != 0 { @@ -713,14 +723,17 @@ func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) // field is anything other than 0x0, the endpoint MUST // respond with a connection error (Section 5.4.1) of // type PROTOCOL_ERROR. + countError("frame_settings_has_stream") return nil, ConnectionError(ErrCodeProtocol) } if len(p)%6 != 0 { + countError("frame_settings_mod_6") // Expecting even number of 6 byte settings. return nil, ConnectionError(ErrCodeFrameSize) } f := &SettingsFrame{FrameHeader: fh, p: p} if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { + countError("frame_settings_window_size_too_big") // Values above the maximum flow control window size of 2^31 - 1 MUST // be treated as a connection error (Section 5.4.1) of type // FLOW_CONTROL_ERROR. @@ -832,11 +845,13 @@ type PingFrame struct { func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } -func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { +func parsePingFrame(_ *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) { if len(payload) != 8 { + countError("frame_ping_length") return nil, ConnectionError(ErrCodeFrameSize) } if fh.StreamID != 0 { + countError("frame_ping_has_stream") return nil, ConnectionError(ErrCodeProtocol) } f := &PingFrame{FrameHeader: fh} @@ -872,11 +887,13 @@ func (f *GoAwayFrame) DebugData() []byte { return f.debugData } -func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { +func parseGoAwayFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) { if fh.StreamID != 0 { + countError("frame_goaway_has_stream") return nil, ConnectionError(ErrCodeProtocol) } if len(p) < 8 { + countError("frame_goaway_short") return nil, ConnectionError(ErrCodeFrameSize) } return &GoAwayFrame{ @@ -912,7 +929,7 @@ func (f *UnknownFrame) Payload() []byte { return f.p } -func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { +func parseUnknownFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) { return &UnknownFrame{fh, p}, nil } @@ -923,8 +940,9 @@ type WindowUpdateFrame struct { Increment uint32 // never read with high bit set } -func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { +func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) { if len(p) != 4 { + countError("frame_windowupdate_bad_len") return nil, ConnectionError(ErrCodeFrameSize) } inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit @@ -936,8 +954,10 @@ func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, err // control window MUST be treated as a connection // error (Section 5.4.1). if fh.StreamID == 0 { + countError("frame_windowupdate_zero_inc_conn") return nil, ConnectionError(ErrCodeProtocol) } + countError("frame_windowupdate_zero_inc_stream") return nil, streamError(fh.StreamID, ErrCodeProtocol) } return &WindowUpdateFrame{ @@ -988,7 +1008,7 @@ func (f *HeadersFrame) HasPriority() bool { return f.FrameHeader.Flags.Has(FlagHeadersPriority) } -func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { +func parseHeadersFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (_ Frame, err error) { hf := &HeadersFrame{ FrameHeader: fh, } @@ -997,11 +1017,13 @@ func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err er // is received whose stream identifier field is 0x0, the recipient MUST // respond with a connection error (Section 5.4.1) of type // PROTOCOL_ERROR. + countError("frame_headers_zero_stream") return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} } var padLength uint8 if fh.Flags.Has(FlagHeadersPadded) { if p, padLength, err = readByte(p); err != nil { + countError("frame_headers_pad_short") return } } @@ -1009,16 +1031,19 @@ func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err er var v uint32 p, v, err = readUint32(p) if err != nil { + countError("frame_headers_prio_short") return nil, err } hf.Priority.StreamDep = v & 0x7fffffff hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set p, hf.Priority.Weight, err = readByte(p) if err != nil { + countError("frame_headers_prio_weight_short") return nil, err } } - if len(p)-int(padLength) <= 0 { + if len(p)-int(padLength) < 0 { + countError("frame_headers_pad_too_big") return nil, streamError(fh.StreamID, ErrCodeProtocol) } hf.headerFragBuf = p[:len(p)-int(padLength)] @@ -1125,11 +1150,13 @@ func (p PriorityParam) IsZero() bool { return p == PriorityParam{} } -func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { +func parsePriorityFrame(_ *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) { if fh.StreamID == 0 { + countError("frame_priority_zero_stream") return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} } if len(payload) != 5 { + countError("frame_priority_bad_length") return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} } v := binary.BigEndian.Uint32(payload[:4]) @@ -1172,11 +1199,13 @@ type RSTStreamFrame struct { ErrCode ErrCode } -func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { +func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) { if len(p) != 4 { + countError("frame_rststream_bad_len") return nil, ConnectionError(ErrCodeFrameSize) } if fh.StreamID == 0 { + countError("frame_rststream_zero_stream") return nil, ConnectionError(ErrCodeProtocol) } return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil @@ -1202,8 +1231,9 @@ type ContinuationFrame struct { headerFragBuf []byte } -func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { +func parseContinuationFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) { if fh.StreamID == 0 { + countError("frame_continuation_zero_stream") return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} } return &ContinuationFrame{fh, p}, nil @@ -1252,7 +1282,7 @@ func (f *PushPromiseFrame) HeadersEnded() bool { return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) } -func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { +func parsePushPromise(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (_ Frame, err error) { pp := &PushPromiseFrame{ FrameHeader: fh, } @@ -1263,6 +1293,7 @@ func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err err // with. If the stream identifier field specifies the value // 0x0, a recipient MUST respond with a connection error // (Section 5.4.1) of type PROTOCOL_ERROR. + countError("frame_pushpromise_zero_stream") return nil, ConnectionError(ErrCodeProtocol) } // The PUSH_PROMISE frame includes optional padding. @@ -1270,18 +1301,21 @@ func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err err var padLength uint8 if fh.Flags.Has(FlagPushPromisePadded) { if p, padLength, err = readByte(p); err != nil { + countError("frame_pushpromise_pad_short") return } } p, pp.PromiseID, err = readUint32(p) if err != nil { + countError("frame_pushpromise_promiseid_short") return } pp.PromiseID = pp.PromiseID & (1<<31 - 1) if int(padLength) > len(p) { // like the DATA frame, error out if padding is longer than the body. + countError("frame_pushpromise_pad_too_big") return nil, ConnectionError(ErrCodeProtocol) } pp.headerFragBuf = p[:len(p)-int(padLength)] @@ -1498,7 +1532,8 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) } if !httpguts.ValidHeaderFieldValue(hf.Value) { - invalid = headerFieldValueError(hf.Value) + // Don't include the value in the error, because it may be sensitive. + invalid = headerFieldValueError(hf.Name) } isPseudo := strings.HasPrefix(hf.Name, ":") if isPseudo { diff --git a/vendor/golang.org/x/net/http2/go115.go b/vendor/golang.org/x/net/http2/go115.go new file mode 100644 index 000000000..908af1ab9 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go115.go @@ -0,0 +1,27 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.15 +// +build go1.15 + +package http2 + +import ( + "context" + "crypto/tls" +) + +// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS +// connection. +func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { + dialer := &tls.Dialer{ + Config: cfg, + } + cn, err := dialer.DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed + return tlsCn, nil +} diff --git a/vendor/golang.org/x/net/http2/go118.go b/vendor/golang.org/x/net/http2/go118.go new file mode 100644 index 000000000..aca4b2b31 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go118.go @@ -0,0 +1,17 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package http2 + +import ( + "crypto/tls" + "net" +) + +func tlsUnderlyingConn(tc *tls.Conn) net.Conn { + return tc.NetConn() +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go index c3ff3fa1c..9e12941da 100644 --- a/vendor/golang.org/x/net/http2/headermap.go +++ b/vendor/golang.org/x/net/http2/headermap.go @@ -6,7 +6,6 @@ package http2 import ( "net/http" - "strings" "sync" ) @@ -79,10 +78,10 @@ func buildCommonHeaderMaps() { } } -func lowerHeader(v string) string { +func lowerHeader(v string) (lower string, ascii bool) { buildCommonHeaderMapsOnce() if s, ok := commonLowerHeader[v]; ok { - return s + return s, true } - return strings.ToLower(v) + return asciiToLower(v) } diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go index a1ab2f056..20d083a71 100644 --- a/vendor/golang.org/x/net/http2/hpack/huffman.go +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -140,50 +140,79 @@ func buildRootHuffmanNode() { panic("unexpected size") } lazyRootHuffmanNode = newInternalNode() - for i, code := range huffmanCodes { - addDecoderNode(byte(i), code, huffmanCodeLen[i]) - } -} + // allocate a leaf node for each of the 256 symbols + leaves := new([256]node) + + for sym, code := range huffmanCodes { + codeLen := huffmanCodeLen[sym] + + cur := lazyRootHuffmanNode + for codeLen > 8 { + codeLen -= 8 + i := uint8(code >> codeLen) + if cur.children[i] == nil { + cur.children[i] = newInternalNode() + } + cur = cur.children[i] + } + shift := 8 - codeLen + start, end := int(uint8(code< 8 { - codeLen -= 8 - i := uint8(code >> codeLen) - if cur.children[i] == nil { - cur.children[i] = newInternalNode() + leaves[sym].sym = byte(sym) + leaves[sym].codeLen = codeLen + for i := start; i < start+end; i++ { + cur.children[i] = &leaves[sym] } - cur = cur.children[i] - } - shift := 8 - codeLen - start, end := int(uint8(code<= 32 { + n %= 32 // Normally would be -= 32 but %= 32 informs compiler 0 <= n <= 31 for upcoming shift + y := uint32(x >> n) // Compiler doesn't combine memory writes if y isn't uint32 + dst = append(dst, byte(y>>24), byte(y>>16), byte(y>>8), byte(y)) } - dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i]) } - - if rembits < 8 { - // special EOS symbol - code := uint32(0x3fffffff) - nbits := uint8(30) - - t := uint8(code >> (nbits - rembits)) - dst[len(dst)-1] |= t + // Add padding bits if necessary + if over := n % 8; over > 0 { + const ( + eosCode = 0x3fffffff + eosNBits = 30 + eosPadByte = eosCode >> (eosNBits - 8) + ) + pad := 8 - over + x = (x << pad) | (eosPadByte >> over) + n += pad // 8 now divides into n exactly } - - return dst + // n in (0, 8, 16, 24, 32) + switch n / 8 { + case 0: + return dst + case 1: + return append(dst, byte(x)) + case 2: + y := uint16(x) + return append(dst, byte(y>>8), byte(y)) + case 3: + y := uint16(x >> 8) + return append(dst, byte(y>>8), byte(y), byte(x)) + } + // case 4: + y := uint32(x) + return append(dst, byte(y>>24), byte(y>>16), byte(y>>8), byte(y)) } // HuffmanEncodeLength returns the number of bytes required to encode @@ -195,35 +224,3 @@ func HuffmanEncodeLength(s string) uint64 { } return (n + 7) / 8 } - -// appendByteToHuffmanCode appends Huffman code for c to dst and -// returns the extended buffer and the remaining bits in the last -// element. The appending is not byte aligned and the remaining bits -// in the last element of dst is given in rembits. -func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { - code := huffmanCodes[c] - nbits := huffmanCodeLen[c] - - for { - if rembits > nbits { - t := uint8(code << (rembits - nbits)) - dst[len(dst)-1] |= t - rembits -= nbits - break - } - - t := uint8(code >> (nbits - rembits)) - dst[len(dst)-1] |= t - - nbits -= rembits - rembits = 8 - - if nbits == 0 { - break - } - - dst = append(dst, 0) - } - - return dst, rembits -} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 5571ccfd2..479ba4b2b 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -13,7 +13,6 @@ // See https://http2.github.io/ for more information on HTTP/2. // // See https://http2.golang.org/ for a test server running this code. -// package http2 // import "golang.org/x/net/http2" import ( @@ -176,10 +175,11 @@ func (s SettingID) String() string { // name (key). See httpguts.ValidHeaderName for the base rules. // // Further, http2 says: -// "Just as in HTTP/1.x, header field names are strings of ASCII -// characters that are compared in a case-insensitive -// fashion. However, header field names MUST be converted to -// lowercase prior to their encoding in HTTP/2. " +// +// "Just as in HTTP/1.x, header field names are strings of ASCII +// characters that are compared in a case-insensitive +// fashion. However, header field names MUST be converted to +// lowercase prior to their encoding in HTTP/2. " func validWireHeaderFieldName(v string) bool { if len(v) == 0 { return false @@ -365,8 +365,8 @@ func (s *sorter) SortStrings(ss []string) { // validPseudoPath reports whether v is a valid :path pseudo-header // value. It must be either: // -// *) a non-empty string starting with '/' -// *) the string '*', for OPTIONS requests. +// - a non-empty string starting with '/' +// - the string '*', for OPTIONS requests. // // For now this is only used a quick check for deciding when to clean // up Opaque URLs before sending requests from the Transport. diff --git a/vendor/golang.org/x/net/http2/not_go115.go b/vendor/golang.org/x/net/http2/not_go115.go new file mode 100644 index 000000000..e6c04cf7a --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go115.go @@ -0,0 +1,31 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.15 +// +build !go1.15 + +package http2 + +import ( + "context" + "crypto/tls" +) + +// dialTLSWithContext opens a TLS connection. +func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { + cn, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + if err := cn.Handshake(); err != nil { + return nil, err + } + if cfg.InsecureSkipVerify { + return cn, nil + } + if err := cn.VerifyHostname(cfg.ServerName); err != nil { + return nil, err + } + return cn, nil +} diff --git a/vendor/golang.org/x/net/http2/not_go118.go b/vendor/golang.org/x/net/http2/not_go118.go new file mode 100644 index 000000000..eab532c96 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go118.go @@ -0,0 +1,17 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package http2 + +import ( + "crypto/tls" + "net" +) + +func tlsUnderlyingConn(tc *tls.Conn) net.Conn { + return nil +} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index 2a5399ec4..c15b8a771 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -30,6 +30,17 @@ type pipeBuffer interface { io.Reader } +// setBuffer initializes the pipe buffer. +// It has no effect if the pipe is already closed. +func (p *pipe) setBuffer(b pipeBuffer) { + p.mu.Lock() + defer p.mu.Unlock() + if p.err != nil || p.breakErr != nil { + return + } + p.b = b +} + func (p *pipe) Len() int { p.mu.Lock() defer p.mu.Unlock() diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index e125bbd2a..47524a61a 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -130,6 +130,12 @@ type Server struct { // If nil, a default scheduler is chosen. NewWriteScheduler func() WriteScheduler + // CountError, if non-nil, is called on HTTP/2 server errors. + // It's intended to increment a metric for monitoring, such + // as an expvar or Prometheus metric. + // The errType consists of only ASCII word characters. + CountError func(errType string) + // Internal state. This is a pointer (rather than embedded directly) // so that we don't embed a Mutex in this struct, which will make the // struct non-copyable, which might break some callers. @@ -231,13 +237,12 @@ func ConfigureServer(s *http.Server, conf *Server) error { if s.TLSConfig == nil { s.TLSConfig = new(tls.Config) - } else if s.TLSConfig.CipherSuites != nil { - // If they already provided a CipherSuite list, return - // an error if it has a bad order or is missing - // ECDHE_RSA_WITH_AES_128_GCM_SHA256 or ECDHE_ECDSA_WITH_AES_128_GCM_SHA256. + } else if s.TLSConfig.CipherSuites != nil && s.TLSConfig.MinVersion < tls.VersionTLS13 { + // If they already provided a TLS 1.0–1.2 CipherSuite list, return an + // error if it is missing ECDHE_RSA_WITH_AES_128_GCM_SHA256 or + // ECDHE_ECDSA_WITH_AES_128_GCM_SHA256. haveRequired := false - sawBad := false - for i, cs := range s.TLSConfig.CipherSuites { + for _, cs := range s.TLSConfig.CipherSuites { switch cs { case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // Alternative MTI cipher to not discourage ECDSA-only servers. @@ -245,14 +250,9 @@ func ConfigureServer(s *http.Server, conf *Server) error { tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: haveRequired = true } - if isBadCipher(cs) { - sawBad = true - } else if sawBad { - return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) - } } if !haveRequired { - return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).") + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256)") } } @@ -265,16 +265,12 @@ func ConfigureServer(s *http.Server, conf *Server) error { s.TLSConfig.PreferServerCipherSuites = true - haveNPN := false - for _, p := range s.TLSConfig.NextProtos { - if p == NextProtoTLS { - haveNPN = true - break - } - } - if !haveNPN { + if !strSliceContains(s.TLSConfig.NextProtos, NextProtoTLS) { s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) } + if !strSliceContains(s.TLSConfig.NextProtos, "http/1.1") { + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "http/1.1") + } if s.TLSNextProto == nil { s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} @@ -319,6 +315,20 @@ type ServeConnOpts struct { // requests. If nil, BaseConfig.Handler is used. If BaseConfig // or BaseConfig.Handler is nil, http.DefaultServeMux is used. Handler http.Handler + + // UpgradeRequest is an initial request received on a connection + // undergoing an h2c upgrade. The request body must have been + // completely read from the connection before calling ServeConn, + // and the 101 Switching Protocols response written. + UpgradeRequest *http.Request + + // Settings is the decoded contents of the HTTP2-Settings header + // in an h2c upgrade request. + Settings []byte + + // SawClientPreface is set if the HTTP/2 connection preface + // has already been read from the connection. + SawClientPreface bool } func (o *ServeConnOpts) context() context.Context { @@ -387,6 +397,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { headerTableSize: initialHeaderTableSize, serveG: newGoroutineLock(), pushEnabled: true, + sawClientPreface: opts.SawClientPreface, } s.state.registerConn(sc) @@ -404,7 +415,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { if s.NewWriteScheduler != nil { sc.writeSched = s.NewWriteScheduler() } else { - sc.writeSched = NewRandomWriteScheduler() + sc.writeSched = NewPriorityWriteScheduler(nil) } // These start at the RFC-specified defaults. If there is a higher @@ -415,6 +426,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) fr := NewFramer(sc.bw, c) + if s.CountError != nil { + fr.countError = s.CountError + } fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() fr.SetMaxReadFrameSize(s.maxReadFrameSize()) @@ -466,9 +480,27 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { } } + if opts.Settings != nil { + fr := &SettingsFrame{ + FrameHeader: FrameHeader{valid: true}, + p: opts.Settings, + } + if err := fr.ForeachSetting(sc.processSetting); err != nil { + sc.rejectConn(ErrCodeProtocol, "invalid settings") + return + } + opts.Settings = nil + } + if hook := testHookGetServerConn; hook != nil { hook(sc) } + + if opts.UpgradeRequest != nil { + sc.upgradeRequest(opts.UpgradeRequest) + opts.UpgradeRequest = nil + } + sc.serve() } @@ -513,6 +545,7 @@ type serverConn struct { // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() pushEnabled bool + sawClientPreface bool // preface has already been read, used in h2c upgrade sawFirstSettings bool // got the initial SETTINGS frame after the preface needToSendSettingsAck bool unackedSettings int // how many SETTINGS have we sent without ACKs? @@ -720,7 +753,15 @@ func (sc *serverConn) canonicalHeader(v string) string { sc.canonHeader = make(map[string]string) } cv = http.CanonicalHeaderKey(v) - sc.canonHeader[v] = cv + // maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of + // entries in the canonHeader cache. This should be larger than the number + // of unique, uncommon header keys likely to be sent by the peer, while not + // so high as to permit unreasonable memory usage if the peer sends an unbounded + // number of unique header keys. + const maxCachedCanonicalHeaders = 32 + if len(sc.canonHeader) < maxCachedCanonicalHeaders { + sc.canonHeader[v] = cv + } return cv } @@ -826,7 +867,7 @@ func (sc *serverConn) serve() { }) sc.unackedSettings++ - // Each connection starts with intialWindowSize inflow tokens. + // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { sc.sendWindowUpdate(nil, int(diff)) @@ -866,6 +907,15 @@ func (sc *serverConn) serve() { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: + // Process any written frames before reading new frames from the client since a + // written frame could have triggered a new stream to be started. + if sc.writingFrameAsync { + select { + case wroteRes := <-sc.wroteFrameCh: + sc.wroteFrame(wroteRes) + default: + } + } if !sc.processFrameFromReader(res) { return } @@ -958,6 +1008,9 @@ var errPrefaceTimeout = errors.New("timeout waiting for client preface") // returns errPrefaceTimeout on timeout, or an error if the greeting // is invalid. func (sc *serverConn) readPreface() error { + if sc.sawClientPreface { + return nil + } errc := make(chan error, 1) go func() { // Read the client preface @@ -1400,7 +1453,7 @@ func (sc *serverConn) processFrame(f Frame) error { // First frame received must be SETTINGS. if !sc.sawFirstSettings { if _, ok := f.(*SettingsFrame); !ok { - return ConnectionError(ErrCodeProtocol) + return sc.countError("first_settings", ConnectionError(ErrCodeProtocol)) } sc.sawFirstSettings = true } @@ -1425,7 +1478,7 @@ func (sc *serverConn) processFrame(f Frame) error { case *PushPromiseFrame: // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - return ConnectionError(ErrCodeProtocol) + return sc.countError("push_promise", ConnectionError(ErrCodeProtocol)) default: sc.vlogf("http2: server ignoring frame: %v", f.Header()) return nil @@ -1445,7 +1498,7 @@ func (sc *serverConn) processPing(f *PingFrame) error { // identifier field value other than 0x0, the recipient MUST // respond with a connection error (Section 5.4.1) of type // PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) + return sc.countError("ping_on_stream", ConnectionError(ErrCodeProtocol)) } if sc.inGoAway && sc.goAwayCode != ErrCodeNo { return nil @@ -1464,7 +1517,7 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { // or PRIORITY on a stream in this state MUST be // treated as a connection error (Section 5.4.1) of // type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) + return sc.countError("stream_idle", ConnectionError(ErrCodeProtocol)) } if st == nil { // "WINDOW_UPDATE can be sent by a peer that has sent a @@ -1475,7 +1528,7 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { return nil } if !st.flow.add(int32(f.Increment)) { - return streamError(f.StreamID, ErrCodeFlowControl) + return sc.countError("bad_flow", streamError(f.StreamID, ErrCodeFlowControl)) } default: // connection-level flow control if !sc.flow.add(int32(f.Increment)) { @@ -1496,7 +1549,7 @@ func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { // identifying an idle stream is received, the // recipient MUST treat this as a connection error // (Section 5.4.1) of type PROTOCOL_ERROR. - return ConnectionError(ErrCodeProtocol) + return sc.countError("reset_idle_stream", ConnectionError(ErrCodeProtocol)) } if st != nil { st.cancelCtx() @@ -1548,7 +1601,7 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error { // Why is the peer ACKing settings we never sent? // The spec doesn't mention this case, but // hang up on them anyway. - return ConnectionError(ErrCodeProtocol) + return sc.countError("ack_mystery", ConnectionError(ErrCodeProtocol)) } return nil } @@ -1556,7 +1609,7 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error { // This isn't actually in the spec, but hang up on // suspiciously large settings frames or those with // duplicate entries. - return ConnectionError(ErrCodeProtocol) + return sc.countError("settings_big_or_dups", ConnectionError(ErrCodeProtocol)) } if err := f.ForeachSetting(sc.processSetting); err != nil { return err @@ -1623,7 +1676,7 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { // control window to exceed the maximum size as a // connection error (Section 5.4.1) of type // FLOW_CONTROL_ERROR." - return ConnectionError(ErrCodeFlowControl) + return sc.countError("setting_win_size", ConnectionError(ErrCodeFlowControl)) } } return nil @@ -1656,7 +1709,7 @@ func (sc *serverConn) processData(f *DataFrame) error { // or PRIORITY on a stream in this state MUST be // treated as a connection error (Section 5.4.1) of // type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) + return sc.countError("data_on_idle", ConnectionError(ErrCodeProtocol)) } // "If a DATA frame is received whose stream is not in "open" @@ -1673,7 +1726,7 @@ func (sc *serverConn) processData(f *DataFrame) error { // and return any flow control bytes since we're not going // to consume them. if sc.inflow.available() < int32(f.Length) { - return streamError(id, ErrCodeFlowControl) + return sc.countError("data_flow", streamError(id, ErrCodeFlowControl)) } // Deduct the flow control from inflow, since we're // going to immediately add it back in @@ -1686,7 +1739,7 @@ func (sc *serverConn) processData(f *DataFrame) error { // Already have a stream error in flight. Don't send another. return nil } - return streamError(id, ErrCodeStreamClosed) + return sc.countError("closed", streamError(id, ErrCodeStreamClosed)) } if st.body == nil { panic("internal error: should have a body in this state") @@ -1698,12 +1751,12 @@ func (sc *serverConn) processData(f *DataFrame) error { // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the // value of a content-length header field does not equal the sum of the // DATA frame payload lengths that form the body. - return streamError(id, ErrCodeProtocol) + return sc.countError("send_too_much", streamError(id, ErrCodeProtocol)) } if f.Length > 0 { // Check whether the client has flow control quota. if st.inflow.available() < int32(f.Length) { - return streamError(id, ErrCodeFlowControl) + return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl)) } st.inflow.take(int32(f.Length)) @@ -1711,7 +1764,7 @@ func (sc *serverConn) processData(f *DataFrame) error { wrote, err := st.body.Write(data) if err != nil { sc.sendWindowUpdate(nil, int(f.Length)-wrote) - return streamError(id, ErrCodeStreamClosed) + return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed)) } if wrote != len(data) { panic("internal error: bad Writer") @@ -1797,7 +1850,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // stream identifier MUST respond with a connection error // (Section 5.4.1) of type PROTOCOL_ERROR. if id%2 != 1 { - return ConnectionError(ErrCodeProtocol) + return sc.countError("headers_even", ConnectionError(ErrCodeProtocol)) } // A HEADERS frame can be used to create a new stream or // send a trailer for an open one. If we already have a stream @@ -1814,7 +1867,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // this state, it MUST respond with a stream error (Section 5.4.2) of // type STREAM_CLOSED. if st.state == stateHalfClosedRemote { - return streamError(id, ErrCodeStreamClosed) + return sc.countError("headers_half_closed", streamError(id, ErrCodeStreamClosed)) } return st.processTrailerHeaders(f) } @@ -1825,7 +1878,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // receives an unexpected stream identifier MUST respond with // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. if id <= sc.maxClientStreamID { - return ConnectionError(ErrCodeProtocol) + return sc.countError("stream_went_down", ConnectionError(ErrCodeProtocol)) } sc.maxClientStreamID = id @@ -1842,14 +1895,14 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { if sc.curClientStreams+1 > sc.advMaxStreams { if sc.unackedSettings == 0 { // They should know better. - return streamError(id, ErrCodeProtocol) + return sc.countError("over_max_streams", streamError(id, ErrCodeProtocol)) } // Assume it's a network race, where they just haven't // received our last SETTINGS update. But actually // this can't happen yet, because we don't yet provide // a way for users to adjust server parameters at // runtime. - return streamError(id, ErrCodeRefusedStream) + return sc.countError("over_max_streams_race", streamError(id, ErrCodeRefusedStream)) } initialState := stateOpen @@ -1859,7 +1912,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { st := sc.newStream(id, 0, initialState) if f.HasPriority() { - if err := checkPriority(f.StreamID, f.Priority); err != nil { + if err := sc.checkPriority(f.StreamID, f.Priority); err != nil { return err } sc.writeSched.AdjustStream(st.id, f.Priority) @@ -1899,19 +1952,39 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { return nil } +func (sc *serverConn) upgradeRequest(req *http.Request) { + sc.serveG.check() + id := uint32(1) + sc.maxClientStreamID = id + st := sc.newStream(id, 0, stateHalfClosedRemote) + st.reqTrailer = req.Trailer + if st.reqTrailer != nil { + st.trailer = make(http.Header) + } + rw := sc.newResponseWriter(st, req) + + // Disable any read deadline set by the net/http package + // prior to the upgrade. + if sc.hs.ReadTimeout != 0 { + sc.conn.SetReadDeadline(time.Time{}) + } + + go sc.runHandler(rw, req, sc.handler.ServeHTTP) +} + func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { sc := st.sc sc.serveG.check() if st.gotTrailerHeader { - return ConnectionError(ErrCodeProtocol) + return sc.countError("dup_trailers", ConnectionError(ErrCodeProtocol)) } st.gotTrailerHeader = true if !f.StreamEnded() { - return streamError(st.id, ErrCodeProtocol) + return sc.countError("trailers_not_ended", streamError(st.id, ErrCodeProtocol)) } if len(f.PseudoFields()) > 0 { - return streamError(st.id, ErrCodeProtocol) + return sc.countError("trailers_pseudo", streamError(st.id, ErrCodeProtocol)) } if st.trailer != nil { for _, hf := range f.RegularFields() { @@ -1920,7 +1993,7 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { // TODO: send more details to the peer somehow. But http2 has // no way to send debug data at a stream level. Discuss with // HTTP folk. - return streamError(st.id, ErrCodeProtocol) + return sc.countError("trailers_bogus", streamError(st.id, ErrCodeProtocol)) } st.trailer[key] = append(st.trailer[key], hf.Value) } @@ -1929,13 +2002,13 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { return nil } -func checkPriority(streamID uint32, p PriorityParam) error { +func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error { if streamID == p.StreamDep { // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." // Section 5.3.3 says that a stream can depend on one of its dependencies, // so it's only self-dependencies that are forbidden. - return streamError(streamID, ErrCodeProtocol) + return sc.countError("priority", streamError(streamID, ErrCodeProtocol)) } return nil } @@ -1944,7 +2017,7 @@ func (sc *serverConn) processPriority(f *PriorityFrame) error { if sc.inGoAway { return nil } - if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { + if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil { return err } sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) @@ -2001,7 +2074,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res isConnect := rp.method == "CONNECT" if isConnect { if rp.path != "" || rp.scheme != "" || rp.authority == "" { - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: @@ -2014,13 +2087,13 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res // "All HTTP/2 requests MUST include exactly one valid // value for the :method, :scheme, and :path // pseudo-header fields" - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol)) } bodyOpen := !f.StreamEnded() if rp.method == "HEAD" && bodyOpen { // HEAD requests can't have bodies - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + return nil, nil, sc.countError("head_body", streamError(f.StreamID, ErrCodeProtocol)) } rp.header = make(http.Header) @@ -2103,7 +2176,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r var err error url_, err = url.ParseRequestURI(rp.path) if err != nil { - return nil, nil, streamError(st.id, ErrCodeProtocol) + return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol)) } requestURI = rp.path } @@ -2129,6 +2202,11 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r } req = req.WithContext(st.ctx) + rw := sc.newResponseWriter(st, req) + return rw, req, nil +} + +func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *responseWriter { rws := responseWriterStatePool.Get().(*responseWriterState) bwSave := rws.bw *rws = responseWriterState{} // zero all the fields @@ -2137,10 +2215,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r rws.bw.Reset(chunkWriter{rws}) rws.stream = st rws.req = req - rws.body = body - - rw := &responseWriter{rws: rws} - return rw, req, nil + return &responseWriter{rws: rws} } // Run on its own goroutine. @@ -2300,17 +2375,18 @@ type requestBody struct { _ incomparable stream *stream conn *serverConn - closed bool // for use by Close only - sawEOF bool // for use by Read only - pipe *pipe // non-nil if we have a HTTP entity message body - needsContinue bool // need to send a 100-continue + closeOnce sync.Once // for use by Close only + sawEOF bool // for use by Read only + pipe *pipe // non-nil if we have a HTTP entity message body + needsContinue bool // need to send a 100-continue } func (b *requestBody) Close() error { - if b.pipe != nil && !b.closed { - b.pipe.BreakWithError(errClosedBody) - } - b.closed = true + b.closeOnce.Do(func() { + if b.pipe != nil { + b.pipe.BreakWithError(errClosedBody) + } + }) return nil } @@ -2354,7 +2430,6 @@ type responseWriterState struct { // immutable within a request: stream *stream req *http.Request - body *requestBody // to close at end of request, if DATA frames didn't conn *serverConn // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc @@ -2530,8 +2605,9 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { // prior to the headers being written. If the set of trailers is fixed // or known before the header is written, the normal Go trailers mechanism // is preferred: -// https://golang.org/pkg/net/http/#ResponseWriter -// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +// +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers const TrailerPrefix = "Trailer:" // promoteUndeclaredTrailers permits http.Handlers to set trailers @@ -2627,8 +2703,7 @@ func checkWriteHeaderCode(code int) { // Issue 22880: require valid WriteHeader status codes. // For now we only enforce that it's three digits. // In the future we might block things over 599 (600 and above aren't defined - // at http://httpwg.org/specs/rfc7231.html#status.codes) - // and we might block under 200 (once we have more mature 1xx support). + // at http://httpwg.org/specs/rfc7231.html#status.codes). // But for now any three digits. // // We used to send "HTTP/1.1 000 0" on the wire in responses but there's @@ -2649,13 +2724,41 @@ func (w *responseWriter) WriteHeader(code int) { } func (rws *responseWriterState) writeHeader(code int) { - if !rws.wroteHeader { - checkWriteHeaderCode(code) - rws.wroteHeader = true - rws.status = code - if len(rws.handlerHeader) > 0 { - rws.snapHeader = cloneHeader(rws.handlerHeader) + if rws.wroteHeader { + return + } + + checkWriteHeaderCode(code) + + // Handle informational headers + if code >= 100 && code <= 199 { + // Per RFC 8297 we must not clear the current header map + h := rws.handlerHeader + + _, cl := h["Content-Length"] + _, te := h["Transfer-Encoding"] + if cl || te { + h = h.Clone() + h.Del("Content-Length") + h.Del("Transfer-Encoding") + } + + if rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + httpResCode: code, + h: h, + endStream: rws.handlerDone && !rws.hasTrailers(), + }) != nil { + rws.dirty = true } + + return + } + + rws.wroteHeader = true + rws.status = code + if len(rws.handlerHeader) > 0 { + rws.snapHeader = cloneHeader(rws.handlerHeader) } } @@ -2789,8 +2892,12 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error { // but PUSH_PROMISE requests cannot have a body. // http://tools.ietf.org/html/rfc7540#section-8.2 // Also disallow Host, since the promised URL must be absolute. - switch strings.ToLower(k) { - case "content-length", "content-encoding", "trailer", "te", "expect", "host": + if asciiEqualFold(k, "content-length") || + asciiEqualFold(k, "content-encoding") || + asciiEqualFold(k, "trailer") || + asciiEqualFold(k, "te") || + asciiEqualFold(k, "expect") || + asciiEqualFold(k, "host") { return fmt.Errorf("promised request headers cannot include %q", k) } } @@ -2982,3 +3089,31 @@ func h1ServerKeepAlivesDisabled(hs *http.Server) bool { } return false } + +func (sc *serverConn) countError(name string, err error) error { + if sc == nil || sc.srv == nil { + return err + } + f := sc.srv.CountError + if f == nil { + return err + } + var typ string + var code ErrCode + switch e := err.(type) { + case ConnectionError: + typ = "conn" + code = ErrCode(e) + case StreamError: + typ = "stream" + code = ErrCode(e.Code) + default: + return err + } + codeStr := errCodeName[code] + if codeStr == "" { + codeStr = strconv.Itoa(int(code)) + } + f(fmt.Sprintf("%s_%s_%s", typ, codeStr, name)) + return err +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 7688d72c3..4ded4dfd5 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -16,7 +16,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "log" "math" mathrand "math/rand" @@ -24,6 +23,7 @@ import ( "net/http" "net/http/httptrace" "net/textproto" + "os" "sort" "strconv" "strings" @@ -51,6 +51,15 @@ const ( transportDefaultStreamMinRefresh = 4 << 10 defaultUserAgent = "Go-http-client/2.0" + + // initialMaxConcurrentStreams is a connections maxConcurrentStreams until + // it's received servers initial SETTINGS frame, which corresponds with the + // spec's minimum recommended value. + initialMaxConcurrentStreams = 100 + + // defaultMaxConcurrentStreams is a connections default maxConcurrentStreams + // if the server doesn't include one in its initial SETTINGS frame. + defaultMaxConcurrentStreams = 1000 ) // Transport is an HTTP/2 Transport. @@ -121,6 +130,17 @@ type Transport struct { // Defaults to 15s. PingTimeout time.Duration + // WriteByteTimeout is the timeout after which the connection will be + // closed no data can be written to it. The timeout begins when data is + // available to write, and is extended whenever any bytes are written. + WriteByteTimeout time.Duration + + // CountError, if non-nil, is called on HTTP/2 transport errors. + // It's intended to increment a metric for monitoring, such + // as an expvar or Prometheus metric. + // The errType consists of only ASCII word characters. + CountError func(errType string) + // t1, if non-nil, is the standard library Transport using // this transport. Its settings are used (but not its // RoundTrip method, etc). @@ -227,11 +247,12 @@ func (t *Transport) initConnPool() { // ClientConn is the state of a single HTTP/2 client connection to an // HTTP/2 server. type ClientConn struct { - t *Transport - tconn net.Conn // usually *tls.Conn, except specialized impls - tlsState *tls.ConnectionState // nil only for specialized impls - reused uint32 // whether conn is being reused; atomic - singleUse bool // whether being used for a single http.Request + t *Transport + tconn net.Conn // usually *tls.Conn, except specialized impls + tlsState *tls.ConnectionState // nil only for specialized impls + reused uint32 // whether conn is being reused; atomic + singleUse bool // whether being used for a single http.Request + getConnCalled bool // used by clientConnPool // readLoop goroutine fields: readerDone chan struct{} // closed on error @@ -244,87 +265,94 @@ type ClientConn struct { cond *sync.Cond // hold mu; broadcast on flow/closed changes flow flow // our conn-level flow control quota (cs.flow is per stream) inflow flow // peer's conn-level flow control + doNotReuse bool // whether conn is marked to not be reused for any future requests closing bool closed bool + seenSettings bool // true if we've seen a settings frame, false otherwise wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received goAwayDebug string // goAway frame's debug data, retained as a string streams map[uint32]*clientStream // client-initiated + streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip nextStreamID uint32 pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams pings map[[8]byte]chan struct{} // in flight ping data to notification channel - bw *bufio.Writer br *bufio.Reader - fr *Framer lastActive time.Time lastIdle time.Time // time last idle - // Settings from peer: (also guarded by mu) + // Settings from peer: (also guarded by wmu) maxFrameSize uint32 maxConcurrentStreams uint32 peerMaxHeaderListSize uint64 initialWindowSize uint32 - hbuf bytes.Buffer // HPACK encoder writes into this - henc *hpack.Encoder - freeBuf [][]byte + // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. + // Write to reqHeaderMu to lock it, read from it to unlock. + // Lock reqmu BEFORE mu or wmu. + reqHeaderMu chan struct{} - wmu sync.Mutex // held while writing; acquire AFTER mu if holding both - werr error // first write error that has occurred + // wmu is held while writing. + // Acquire BEFORE mu when holding both, to avoid blocking mu on network writes. + // Only acquire both at the same time when changing peer settings. + wmu sync.Mutex + bw *bufio.Writer + fr *Framer + werr error // first write error that has occurred + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder } // clientStream is the state for a single HTTP/2 stream. One of these // is created for each Transport.RoundTrip call. type clientStream struct { - cc *ClientConn - req *http.Request + cc *ClientConn + + // Fields of Request that we may access even after the response body is closed. + ctx context.Context + reqCancel <-chan struct{} + trace *httptrace.ClientTrace // or nil ID uint32 - resc chan resAndError bufPipe pipe // buffered pipe with the flow-controlled response payload - startedWrite bool // started request body write; guarded by cc.mu requestedGzip bool - on100 func() // optional code to run if get a 100 continue response + isHead bool + + abortOnce sync.Once + abort chan struct{} // closed to signal stream should end immediately + abortErr error // set if abort is closed + + peerClosed chan struct{} // closed when the peer sends an END_STREAM flag + donec chan struct{} // closed after the stream is in the closed state + on100 chan struct{} // buffered; written to if a 100 is received + + respHeaderRecv chan struct{} // closed when headers are received + res *http.Response // set if respHeaderRecv is closed flow flow // guarded by cc.mu inflow flow // guarded by cc.mu bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read readErr error // sticky read error; owned by transportResponseBody.Read - stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu - didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu - peerReset chan struct{} // closed on peer reset - resetErr error // populated before peerReset is closed + reqBody io.ReadCloser + reqBodyContentLength int64 // -1 means unknown + reqBodyClosed bool // body has been closed; guarded by cc.mu - done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu + // owned by writeRequest: + sentEndStream bool // sent an END_STREAM flag to the peer + sentHeaders bool // owned by clientConnReadLoop: firstByte bool // got the first response byte pastHeaders bool // got first MetaHeadersFrame (actual headers) pastTrailers bool // got optional second MetaHeadersFrame (trailers) num1xx uint8 // number of 1xx responses seen + readClosed bool // peer sent an END_STREAM flag + readAborted bool // read loop reset the stream trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer } -// awaitRequestCancel waits for the user to cancel a request or for the done -// channel to be signaled. A non-nil error is returned only if the request was -// canceled. -func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { - ctx := req.Context() - if req.Cancel == nil && ctx.Done() == nil { - return nil - } - select { - case <-req.Cancel: - return errRequestCanceled - case <-ctx.Done(): - return ctx.Err() - case <-done: - return nil - } -} - var got1xxFuncForTests func(int, textproto.MIMEHeader) error // get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func, @@ -336,73 +364,65 @@ func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error return traceGot1xxResponseFunc(cs.trace) } -// awaitRequestCancel waits for the user to cancel a request, its context to -// expire, or for the request to be done (any way it might be removed from the -// cc.streams map: peer reset, successful completion, TCP connection breakage, -// etc). If the request is canceled, then cs will be canceled and closed. -func (cs *clientStream) awaitRequestCancel(req *http.Request) { - if err := awaitRequestCancel(req, cs.done); err != nil { - cs.cancelStream() - cs.bufPipe.CloseWithError(err) - } +func (cs *clientStream) abortStream(err error) { + cs.cc.mu.Lock() + defer cs.cc.mu.Unlock() + cs.abortStreamLocked(err) } -func (cs *clientStream) cancelStream() { - cc := cs.cc - cc.mu.Lock() - didReset := cs.didReset - cs.didReset = true - cc.mu.Unlock() - - if !didReset { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - cc.forgetStreamID(cs.ID) +func (cs *clientStream) abortStreamLocked(err error) { + cs.abortOnce.Do(func() { + cs.abortErr = err + close(cs.abort) + }) + if cs.reqBody != nil && !cs.reqBodyClosed { + cs.reqBody.Close() + cs.reqBodyClosed = true } -} - -// checkResetOrDone reports any error sent in a RST_STREAM frame by the -// server, or errStreamClosed if the stream is complete. -func (cs *clientStream) checkResetOrDone() error { - select { - case <-cs.peerReset: - return cs.resetErr - case <-cs.done: - return errStreamClosed - default: - return nil + // TODO(dneil): Clean up tests where cs.cc.cond is nil. + if cs.cc.cond != nil { + // Wake up writeRequestBody if it is waiting on flow control. + cs.cc.cond.Broadcast() } } -func (cs *clientStream) getStartedWrite() bool { +func (cs *clientStream) abortRequestBodyWrite() { cc := cs.cc cc.mu.Lock() defer cc.mu.Unlock() - return cs.startedWrite -} - -func (cs *clientStream) abortRequestBodyWrite(err error) { - if err == nil { - panic("nil error") + if cs.reqBody != nil && !cs.reqBodyClosed { + cs.reqBody.Close() + cs.reqBodyClosed = true + cc.cond.Broadcast() } - cc := cs.cc - cc.mu.Lock() - cs.stopReqBody = err - cc.cond.Broadcast() - cc.mu.Unlock() } type stickyErrWriter struct { - w io.Writer - err *error + conn net.Conn + timeout time.Duration + err *error } func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - n, err = sew.w.Write(p) - *sew.err = err - return + for { + if sew.timeout != 0 { + sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) + } + nn, err := sew.conn.Write(p[n:]) + n += nn + if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { + // Keep extending the deadline so long as we're making progress. + continue + } + if sew.timeout != 0 { + sew.conn.SetWriteDeadline(time.Time{}) + } + *sew.err = err + return n, err + } } // noCachedConnError is the concrete type of ErrNoCachedConn, which @@ -475,20 +495,22 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res } reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) traceGotConn(req, cc, reused) - res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req) + res, err := cc.RoundTrip(req) if err != nil && retry <= 6 { - if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil { + if req, err = shouldRetryRequest(req, err); err == nil { // After the first retry, do exponential backoff with 10% jitter. if retry == 0 { + t.vlogf("RoundTrip retrying after failure: %v", err) continue } backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) select { case <-time.After(time.Second * time.Duration(backoff)): + t.vlogf("RoundTrip retrying after failure: %v", err) continue case <-req.Context().Done(): - return nil, req.Context().Err() + err = req.Context().Err() } } } @@ -519,7 +541,7 @@ var ( // response headers. It is always called with a non-nil error. // It returns either a request to retry (either the same request, or a // modified clone), or an error if the request can't be replayed. -func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) { +func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) { if !canRetryError(err) { return nil, err } @@ -532,7 +554,6 @@ func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*htt // If the request body can be reset back to its original // state via the optional req.GetBody, do that. if req.GetBody != nil { - // TODO: consider a req.Body.Close here? or audit that all caller paths do? body, err := req.GetBody() if err != nil { return nil, err @@ -544,10 +565,8 @@ func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*htt // The Request.Body can't reset back to the beginning, but we // don't seem to have started to read from it yet, so reuse - // the request directly. The "afterBodyWrite" means the - // bodyWrite process has started, which becomes true before - // the first Read. - if !afterBodyWrite { + // the request directly. + if err == errClientConnUnusable { return req, nil } @@ -559,17 +578,21 @@ func canRetryError(err error) bool { return true } if se, ok := err.(StreamError); ok { + if se.Code == ErrCodeProtocol && se.Cause == errFromPeer { + // See golang/go#47635, golang/go#42777 + return true + } return se.Code == ErrCodeRefusedStream } return false } -func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { +func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err } - tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) + tconn, err := t.dialTLS(ctx)("tcp", addr, t.newTLSConfig(host)) if err != nil { return nil, err } @@ -590,34 +613,24 @@ func (t *Transport) newTLSConfig(host string) *tls.Config { return cfg } -func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { +func (t *Transport) dialTLS(ctx context.Context) func(string, string, *tls.Config) (net.Conn, error) { if t.DialTLS != nil { return t.DialTLS } - return t.dialTLSDefault -} - -func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { - cn, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - if err := cn.Handshake(); err != nil { - return nil, err - } - if !cfg.InsecureSkipVerify { - if err := cn.VerifyHostname(cfg.ServerName); err != nil { + return func(network, addr string, cfg *tls.Config) (net.Conn, error) { + tlsCn, err := t.dialTLSWithContext(ctx, network, addr, cfg) + if err != nil { return nil, err } + state := tlsCn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) + } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("http2: could not negotiate protocol mutually") + } + return tlsCn, nil } - state := cn.ConnectionState() - if p := state.NegotiatedProtocol; p != NextProtoTLS { - return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) - } - if !state.NegotiatedProtocolIsMutual { - return nil, errors.New("http2: could not negotiate protocol mutually") - } - return cn, nil } // disableKeepAlives reports whether connections should be closed as @@ -643,14 +656,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro tconn: c, readerDone: make(chan struct{}), nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*clientStream), singleUse: singleUse, wantSettingsAck: true, pings: make(map[[8]byte]chan struct{}), + reqHeaderMu: make(chan struct{}, 1), } if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d @@ -665,9 +679,16 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. - cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) + cc.bw = bufio.NewWriter(stickyErrWriter{ + conn: c, + timeout: t.WriteByteTimeout, + err: &cc.werr, + }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) + if t.CountError != nil { + cc.fr.countError = t.CountError + } cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() @@ -712,14 +733,23 @@ func (cc *ClientConn) healthCheck() { // trigger the healthCheck again if there is no frame received. ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) defer cancel() + cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) if err != nil { + cc.vlogf("http2: Transport health check failure: %v", err) cc.closeForLostPing() - cc.t.connPool().MarkDead(cc) - return + } else { + cc.vlogf("http2: Transport health check success") } } +// SetDoNotReuse marks cc as not reusable for future HTTP requests. +func (cc *ClientConn) SetDoNotReuse() { + cc.mu.Lock() + defer cc.mu.Unlock() + cc.doNotReuse = true +} + func (cc *ClientConn) setGoAway(f *GoAwayFrame) { cc.mu.Lock() defer cc.mu.Unlock() @@ -737,27 +767,94 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) { last := f.LastStreamID for streamID, cs := range cc.streams { if streamID > last { - select { - case cs.resc <- resAndError{err: errClientConnGotGoAway}: - default: - } + cs.abortStreamLocked(errClientConnGotGoAway) } } } // CanTakeNewRequest reports whether the connection can take a new request, // meaning it has not been closed or received or sent a GOAWAY. +// +// If the caller is going to immediately make a new request on this +// connection, use ReserveNewRequest instead. func (cc *ClientConn) CanTakeNewRequest() bool { cc.mu.Lock() defer cc.mu.Unlock() return cc.canTakeNewRequestLocked() } +// ReserveNewRequest is like CanTakeNewRequest but also reserves a +// concurrent stream in cc. The reservation is decremented on the +// next call to RoundTrip. +func (cc *ClientConn) ReserveNewRequest() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + if st := cc.idleStateLocked(); !st.canTakeNewRequest { + return false + } + cc.streamsReserved++ + return true +} + +// ClientConnState describes the state of a ClientConn. +type ClientConnState struct { + // Closed is whether the connection is closed. + Closed bool + + // Closing is whether the connection is in the process of + // closing. It may be closing due to shutdown, being a + // single-use connection, being marked as DoNotReuse, or + // having received a GOAWAY frame. + Closing bool + + // StreamsActive is how many streams are active. + StreamsActive int + + // StreamsReserved is how many streams have been reserved via + // ClientConn.ReserveNewRequest. + StreamsReserved int + + // StreamsPending is how many requests have been sent in excess + // of the peer's advertised MaxConcurrentStreams setting and + // are waiting for other streams to complete. + StreamsPending int + + // MaxConcurrentStreams is how many concurrent streams the + // peer advertised as acceptable. Zero means no SETTINGS + // frame has been received yet. + MaxConcurrentStreams uint32 + + // LastIdle, if non-zero, is when the connection last + // transitioned to idle state. + LastIdle time.Time +} + +// State returns a snapshot of cc's state. +func (cc *ClientConn) State() ClientConnState { + cc.wmu.Lock() + maxConcurrent := cc.maxConcurrentStreams + if !cc.seenSettings { + maxConcurrent = 0 + } + cc.wmu.Unlock() + + cc.mu.Lock() + defer cc.mu.Unlock() + return ClientConnState{ + Closed: cc.closed, + Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil, + StreamsActive: len(cc.streams), + StreamsReserved: cc.streamsReserved, + StreamsPending: cc.pendingRequests, + LastIdle: cc.lastIdle, + MaxConcurrentStreams: maxConcurrent, + } +} + // clientConnIdleState describes the suitability of a client // connection to initiate a new RoundTrip request. type clientConnIdleState struct { canTakeNewRequest bool - freshConn bool // whether it's unused by any previous request } func (cc *ClientConn) idleState() clientConnIdleState { @@ -778,13 +875,13 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // writing it. maxConcurrentOkay = true } else { - maxConcurrentOkay = int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) + maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams) } st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && + !cc.doNotReuse && int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && !cc.tooIdleLocked() - st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest return } @@ -813,9 +910,27 @@ func (cc *ClientConn) onIdleTimeout() { cc.closeIfIdle() } +func (cc *ClientConn) closeConn() error { + t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn) + defer t.Stop() + return cc.tconn.Close() +} + +// A tls.Conn.Close can hang for a long time if the peer is unresponsive. +// Try to shut it down more aggressively. +func (cc *ClientConn) forceCloseConn() { + tc, ok := cc.tconn.(*tls.Conn) + if !ok { + return + } + if nc := tlsUnderlyingConn(tc); nc != nil { + nc.Close() + } +} + func (cc *ClientConn) closeIfIdle() { cc.mu.Lock() - if len(cc.streams) > 0 { + if len(cc.streams) > 0 || cc.streamsReserved > 0 { cc.mu.Unlock() return } @@ -827,18 +942,24 @@ func (cc *ClientConn) closeIfIdle() { if VerboseLogs { cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) } - cc.tconn.Close() + cc.closeConn() +} + +func (cc *ClientConn) isDoNotReuseAndIdle() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.doNotReuse && len(cc.streams) == 0 } var shutdownEnterWaitStateHook = func() {} -// Shutdown gracefully close the client connection, waiting for running streams to complete. +// Shutdown gracefully closes the client connection, waiting for running streams to complete. func (cc *ClientConn) Shutdown(ctx context.Context) error { if err := cc.sendGoAway(); err != nil { return err } // Wait for all in-flight streams to complete or connection to close - done := make(chan error, 1) + done := make(chan struct{}) cancelled := false // guarded by cc.mu go func() { cc.mu.Lock() @@ -846,7 +967,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { for { if len(cc.streams) == 0 || cc.closed { cc.closed = true - done <- cc.tconn.Close() + close(done) break } if cancelled { @@ -857,8 +978,8 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { }() shutdownEnterWaitStateHook() select { - case err := <-done: - return err + case <-done: + return cc.closeConn() case <-ctx.Done(): cc.mu.Lock() // Free the goroutine above @@ -871,15 +992,18 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { func (cc *ClientConn) sendGoAway() error { cc.mu.Lock() - defer cc.mu.Unlock() - cc.wmu.Lock() - defer cc.wmu.Unlock() - if cc.closing { + closing := cc.closing + cc.closing = true + maxStreamID := cc.nextStreamID + cc.mu.Unlock() + if closing { // GOAWAY sent already return nil } + + cc.wmu.Lock() + defer cc.wmu.Unlock() // Send a graceful shutdown frame to server - maxStreamID := cc.nextStreamID if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil { return err } @@ -887,7 +1011,6 @@ func (cc *ClientConn) sendGoAway() error { return err } // Prevent new requests - cc.closing = true return nil } @@ -895,18 +1018,13 @@ func (cc *ClientConn) sendGoAway() error { // err is sent to streams. func (cc *ClientConn) closeForError(err error) error { cc.mu.Lock() - defer cc.cond.Broadcast() - defer cc.mu.Unlock() - for id, cs := range cc.streams { - select { - case cs.resc <- resAndError{err: err}: - default: - } - cs.bufPipe.CloseWithError(err) - delete(cc.streams, id) - } cc.closed = true - return cc.tconn.Close() + for _, cs := range cc.streams { + cs.abortStreamLocked(err) + } + cc.cond.Broadcast() + cc.mu.Unlock() + return cc.closeConn() } // Close closes the client connection immediately. @@ -920,47 +1038,10 @@ func (cc *ClientConn) Close() error { // closes the client connection immediately. In-flight requests are interrupted. func (cc *ClientConn) closeForLostPing() error { err := errors.New("http2: client connection lost") - return cc.closeForError(err) -} - -const maxAllocFrameSize = 512 << 10 - -// frameBuffer returns a scratch buffer suitable for writing DATA frames. -// They're capped at the min of the peer's max frame size or 512KB -// (kinda arbitrarily), but definitely capped so we don't allocate 4GB -// bufers. -func (cc *ClientConn) frameScratchBuffer() []byte { - cc.mu.Lock() - size := cc.maxFrameSize - if size > maxAllocFrameSize { - size = maxAllocFrameSize + if f := cc.t.CountError; f != nil { + f("conn_close_lost_ping") } - for i, buf := range cc.freeBuf { - if len(buf) >= int(size) { - cc.freeBuf[i] = nil - cc.mu.Unlock() - return buf[:size] - } - } - cc.mu.Unlock() - return make([]byte, size) -} - -func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { - cc.mu.Lock() - defer cc.mu.Unlock() - const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. - if len(cc.freeBuf) < maxBufs { - cc.freeBuf = append(cc.freeBuf, buf) - return - } - for i, old := range cc.freeBuf { - if old == nil { - cc.freeBuf[i] = buf - return - } - } - // forget about it. + return cc.closeForError(err) } // errRequestCanceled is a copy of net/http's errRequestCanceled because it's not @@ -1005,7 +1086,7 @@ func checkConnHeaders(req *http.Request) error { if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !strings.EqualFold(vv[0], "close") && !strings.EqualFold(vv[0], "keep-alive")) { + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { return fmt.Errorf("http2: invalid Connection request header: %q", vv) } return nil @@ -1024,41 +1105,158 @@ func actualContentLength(req *http.Request) int64 { return -1 } +func (cc *ClientConn) decrStreamReservations() { + cc.mu.Lock() + defer cc.mu.Unlock() + cc.decrStreamReservationsLocked() +} + +func (cc *ClientConn) decrStreamReservationsLocked() { + if cc.streamsReserved > 0 { + cc.streamsReserved-- + } +} + func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { - resp, _, err := cc.roundTrip(req) - return resp, err + ctx := req.Context() + cs := &clientStream{ + cc: cc, + ctx: ctx, + reqCancel: req.Cancel, + isHead: req.Method == "HEAD", + reqBody: req.Body, + reqBodyContentLength: actualContentLength(req), + trace: httptrace.ContextClientTrace(ctx), + peerClosed: make(chan struct{}), + abort: make(chan struct{}), + respHeaderRecv: make(chan struct{}), + donec: make(chan struct{}), + } + go cs.doRequest(req) + + waitDone := func() error { + select { + case <-cs.donec: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-cs.reqCancel: + return errRequestCanceled + } + } + + handleResponseHeaders := func() (*http.Response, error) { + res := cs.res + if res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + cs.abortRequestBodyWrite() + } + res.Request = req + res.TLS = cc.tlsState + if res.Body == noBody && actualContentLength(req) == 0 { + // If there isn't a request or response body still being + // written, then wait for the stream to be closed before + // RoundTrip returns. + if err := waitDone(); err != nil { + return nil, err + } + } + return res, nil + } + + for { + select { + case <-cs.respHeaderRecv: + return handleResponseHeaders() + case <-cs.abort: + select { + case <-cs.respHeaderRecv: + // If both cs.respHeaderRecv and cs.abort are signaling, + // pick respHeaderRecv. The server probably wrote the + // response and immediately reset the stream. + // golang.org/issue/49645 + return handleResponseHeaders() + default: + waitDone() + return nil, cs.abortErr + } + case <-ctx.Done(): + err := ctx.Err() + cs.abortStream(err) + return nil, err + case <-cs.reqCancel: + cs.abortStream(errRequestCanceled) + return nil, errRequestCanceled + } + } } -func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) { +// doRequest runs for the duration of the request lifetime. +// +// It sends the request and performs post-request cleanup (closing Request.Body, etc.). +func (cs *clientStream) doRequest(req *http.Request) { + err := cs.writeRequest(req) + cs.cleanupWriteRequest(err) +} + +// writeRequest sends a request. +// +// It returns nil after the request is written, the response read, +// and the request stream is half-closed by the peer. +// +// It returns non-nil if the request ends otherwise. +// If the returned error is StreamError, the error Code may be used in resetting the stream. +func (cs *clientStream) writeRequest(req *http.Request) (err error) { + cc := cs.cc + ctx := cs.ctx + if err := checkConnHeaders(req); err != nil { - return nil, false, err - } - if cc.idleTimer != nil { - cc.idleTimer.Stop() + return err } - trailers, err := commaSeparatedTrailers(req) - if err != nil { - return nil, false, err + // Acquire the new-request lock by writing to reqHeaderMu. + // This lock guards the critical section covering allocating a new stream ID + // (requires mu) and creating the stream (requires wmu). + if cc.reqHeaderMu == nil { + panic("RoundTrip on uninitialized ClientConn") // for tests + } + select { + case cc.reqHeaderMu <- struct{}{}: + case <-cs.reqCancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() } - hasTrailers := trailers != "" cc.mu.Lock() - if err := cc.awaitOpenSlotForRequest(req); err != nil { + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + cc.decrStreamReservationsLocked() + if err := cc.awaitOpenSlotForStreamLocked(cs); err != nil { cc.mu.Unlock() - return nil, false, err + <-cc.reqHeaderMu + return err } - - body := req.Body - contentLen := actualContentLength(req) - hasBody := contentLen != 0 + cc.addStreamLocked(cs) // assigns stream ID + if isConnectionCloseRequest(req) { + cc.doNotReuse = true + } + cc.mu.Unlock() // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - var requestedGzip bool if !cc.t.disableCompression() && req.Header.Get("Accept-Encoding") == "" && req.Header.Get("Range") == "" && - req.Method != "HEAD" { + !cs.isHead { // Request gzip only, not deflate. Deflate is ambiguous and // not as universally supported anyway. // See: https://zlib.net/zlib_faq.html#faq39 @@ -1071,195 +1269,224 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf // We don't request gzip if the request is for a range, since // auto-decoding a portion of a gzipped document will just fail // anyway. See https://golang.org/issue/8923 - requestedGzip = true + cs.requestedGzip = true } - // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is - // sent by writeRequestBody below, along with any Trailers, - // again in form HEADERS{1}, CONTINUATION{0,}) - hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) - if err != nil { - cc.mu.Unlock() - return nil, false, err + continueTimeout := cc.t.expectContinueTimeout() + if continueTimeout != 0 { + if !httpguts.HeaderValuesContainsToken(req.Header["Expect"], "100-continue") { + continueTimeout = 0 + } else { + cs.on100 = make(chan struct{}, 1) + } } - cs := cc.newStream() - cs.req = req - cs.trace = httptrace.ContextClientTrace(req.Context()) - cs.requestedGzip = requestedGzip - bodyWriter := cc.t.getBodyWriterState(cs, body) - cs.on100 = bodyWriter.on100 + // Past this point (where we send request headers), it is possible for + // RoundTrip to return successfully. Since the RoundTrip contract permits + // the caller to "mutate or reuse" the Request after closing the Response's Body, + // we must take care when referencing the Request from here on. + err = cs.encodeAndWriteHeaders(req) + <-cc.reqHeaderMu + if err != nil { + return err + } - defer func() { - cc.wmu.Lock() - werr := cc.werr - cc.wmu.Unlock() - if werr != nil { - cc.Close() + hasBody := cs.reqBodyContentLength != 0 + if !hasBody { + cs.sentEndStream = true + } else { + if continueTimeout != 0 { + traceWait100Continue(cs.trace) + timer := time.NewTimer(continueTimeout) + select { + case <-timer.C: + err = nil + case <-cs.on100: + err = nil + case <-cs.abort: + err = cs.abortErr + case <-ctx.Done(): + err = ctx.Err() + case <-cs.reqCancel: + err = errRequestCanceled + } + timer.Stop() + if err != nil { + traceWroteRequest(cs.trace, err) + return err + } } - }() - cc.wmu.Lock() - endStream := !hasBody && !hasTrailers - werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) - cc.wmu.Unlock() - traceWroteHeaders(cs.trace) - cc.mu.Unlock() - - if werr != nil { - if hasBody { - req.Body.Close() // per RoundTripper contract - bodyWriter.cancel() + if err = cs.writeRequestBody(req); err != nil { + if err != errStopReqBodyWrite { + traceWroteRequest(cs.trace, err) + return err + } + } else { + cs.sentEndStream = true } - cc.forgetStreamID(cs.ID) - // Don't bother sending a RST_STREAM (our write already failed; - // no need to keep writing) - traceWroteRequest(cs.trace, werr) - return nil, false, werr } + traceWroteRequest(cs.trace, err) + var respHeaderTimer <-chan time.Time - if hasBody { - bodyWriter.scheduleBodyWrite() - } else { - traceWroteRequest(cs.trace, nil) - if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) - defer timer.Stop() - respHeaderTimer = timer.C + var respHeaderRecv chan struct{} + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + respHeaderRecv = cs.respHeaderRecv + } + // Wait until the peer half-closes its end of the stream, + // or until the request is aborted (via context, error, or otherwise), + // whichever comes first. + for { + select { + case <-cs.peerClosed: + return nil + case <-respHeaderTimer: + return errTimeout + case <-respHeaderRecv: + respHeaderRecv = nil + respHeaderTimer = nil // keep waiting for END_STREAM + case <-cs.abort: + return cs.abortErr + case <-ctx.Done(): + return ctx.Err() + case <-cs.reqCancel: + return errRequestCanceled } } +} - readLoopResCh := cs.resc - bodyWritten := false - ctx := req.Context() +func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error { + cc := cs.cc + ctx := cs.ctx - handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) { - res := re.res - if re.err != nil || res.StatusCode > 299 { - // On error or status code 3xx, 4xx, 5xx, etc abort any - // ongoing write, assuming that the server doesn't care - // about our request body. If the server replied with 1xx or - // 2xx, however, then assume the server DOES potentially - // want our body (e.g. full-duplex streaming: - // golang.org/issue/13444). If it turns out the server - // doesn't, they'll RST_STREAM us soon enough. This is a - // heuristic to avoid adding knobs to Transport. Hopefully - // we can keep it. - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWrite) - if hasBody && !bodyWritten { - <-bodyWriter.resc - } - } - if re.err != nil { - cc.forgetStreamID(cs.ID) - return nil, cs.getStartedWrite(), re.err - } - res.Request = req - res.TLS = cc.tlsState - return res, false, nil + cc.wmu.Lock() + defer cc.wmu.Unlock() + + // If the request was canceled while waiting for cc.mu, just quit. + select { + case <-cs.abort: + return cs.abortErr + case <-ctx.Done(): + return ctx.Err() + case <-cs.reqCancel: + return errRequestCanceled + default: } - for { + // Encode headers. + // + // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is + // sent by writeRequestBody below, along with any Trailers, + // again in form HEADERS{1}, CONTINUATION{0,}) + trailers, err := commaSeparatedTrailers(req) + if err != nil { + return err + } + hasTrailers := trailers != "" + contentLen := actualContentLength(req) + hasBody := contentLen != 0 + hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen) + if err != nil { + return err + } + + // Write the request. + endStream := !hasBody && !hasTrailers + cs.sentHeaders = true + err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) + traceWroteHeaders(cs.trace) + return err +} + +// cleanupWriteRequest performs post-request tasks. +// +// If err (the result of writeRequest) is non-nil and the stream is not closed, +// cleanupWriteRequest will send a reset to the peer. +func (cs *clientStream) cleanupWriteRequest(err error) { + cc := cs.cc + + if cs.ID == 0 { + // We were canceled before creating the stream, so return our reservation. + cc.decrStreamReservations() + } + + // TODO: write h12Compare test showing whether + // Request.Body is closed by the Transport, + // and in multiple cases: server replies <=299 and >299 + // while still writing request body + cc.mu.Lock() + bodyClosed := cs.reqBodyClosed + cs.reqBodyClosed = true + cc.mu.Unlock() + if !bodyClosed && cs.reqBody != nil { + cs.reqBody.Close() + } + + if err != nil && cs.sentEndStream { + // If the connection is closed immediately after the response is read, + // we may be aborted before finishing up here. If the stream was closed + // cleanly on both sides, there is no error. select { - case re := <-readLoopResCh: - return handleReadLoopResponse(re) - case <-respHeaderTimer: - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - <-bodyWriter.resc - } - cc.forgetStreamID(cs.ID) - return nil, cs.getStartedWrite(), errTimeout - case <-ctx.Done(): - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - <-bodyWriter.resc - } - cc.forgetStreamID(cs.ID) - return nil, cs.getStartedWrite(), ctx.Err() - case <-req.Cancel: - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + case <-cs.peerClosed: + err = nil + default: + } + } + if err != nil { + cs.abortStream(err) // possibly redundant, but harmless + if cs.sentHeaders { + if se, ok := err.(StreamError); ok { + if se.Cause != errFromPeer { + cc.writeStreamReset(cs.ID, se.Code, err) + } } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - <-bodyWriter.resc - } - cc.forgetStreamID(cs.ID) - return nil, cs.getStartedWrite(), errRequestCanceled - case <-cs.peerReset: - // processResetStream already removed the - // stream from the streams map; no need for - // forgetStreamID. - return nil, cs.getStartedWrite(), cs.resetErr - case err := <-bodyWriter.resc: - bodyWritten = true - // Prefer the read loop's response, if available. Issue 16102. - select { - case re := <-readLoopResCh: - return handleReadLoopResponse(re) - default: - } - if err != nil { - cc.forgetStreamID(cs.ID) - return nil, cs.getStartedWrite(), err - } - if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) - defer timer.Stop() - respHeaderTimer = timer.C + cc.writeStreamReset(cs.ID, ErrCodeCancel, err) } } + cs.bufPipe.CloseWithError(err) // no-op if already closed + } else { + if cs.sentHeaders && !cs.sentEndStream { + cc.writeStreamReset(cs.ID, ErrCodeNo, nil) + } + cs.bufPipe.CloseWithError(errRequestCanceled) + } + if cs.ID != 0 { + cc.forgetStreamID(cs.ID) + } + + cc.wmu.Lock() + werr := cc.werr + cc.wmu.Unlock() + if werr != nil { + cc.Close() } + + close(cs.donec) } -// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams. +// awaitOpenSlotForStream waits until len(streams) < maxConcurrentStreams. // Must hold cc.mu. -func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { - var waitingForConn chan struct{} - var waitingForConnErr error // guarded by cc.mu +func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { for { cc.lastActive = time.Now() if cc.closed || !cc.canTakeNewRequestLocked() { - if waitingForConn != nil { - close(waitingForConn) - } return errClientConnUnusable } cc.lastIdle = time.Time{} - if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { - if waitingForConn != nil { - close(waitingForConn) - } + if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) { return nil } - // Unfortunately, we cannot wait on a condition variable and channel at - // the same time, so instead, we spin up a goroutine to check if the - // request is canceled while we wait for a slot to open in the connection. - if waitingForConn == nil { - waitingForConn = make(chan struct{}) - go func() { - if err := awaitRequestCancel(req, waitingForConn); err != nil { - cc.mu.Lock() - waitingForConnErr = err - cc.cond.Broadcast() - cc.mu.Unlock() - } - }() - } cc.pendingRequests++ cc.cond.Wait() cc.pendingRequests-- - if waitingForConnErr != nil { - return waitingForConnErr + select { + case <-cs.abort: + return cs.abortErr + default: } } } @@ -1286,10 +1513,6 @@ func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize cc.fr.WriteContinuation(streamID, endHeaders, chunk) } } - // TODO(bradfitz): this Flush could potentially block (as - // could the WriteHeaders call(s) above), which means they - // wouldn't respond to Request.Cancel being readable. That's - // rare, but this should probably be in a goroutine. cc.bw.Flush() return cc.werr } @@ -1305,32 +1528,59 @@ var ( errReqBodyTooLong = errors.New("http2: request body larger than specified content length") ) -func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { +// frameScratchBufferLen returns the length of a buffer to use for +// outgoing request bodies to read/write to/from. +// +// It returns max(1, min(peer's advertised max frame size, +// Request.ContentLength+1, 512KB)). +func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int { + const max = 512 << 10 + n := int64(maxFrameSize) + if n > max { + n = max + } + if cl := cs.reqBodyContentLength; cl != -1 && cl+1 < n { + // Add an extra byte past the declared content-length to + // give the caller's Request.Body io.Reader a chance to + // give us more bytes than they declared, so we can catch it + // early. + n = cl + 1 + } + if n < 1 { + return 1 + } + return int(n) // doesn't truncate; max is 512K +} + +var bufPool sync.Pool // of *[]byte + +func (cs *clientStream) writeRequestBody(req *http.Request) (err error) { cc := cs.cc + body := cs.reqBody sentEnd := false // whether we sent the final DATA frame w/ END_STREAM - buf := cc.frameScratchBuffer() - defer cc.putFrameScratchBuffer(buf) - - defer func() { - traceWroteRequest(cs.trace, err) - // TODO: write h12Compare test showing whether - // Request.Body is closed by the Transport, - // and in multiple cases: server replies <=299 and >299 - // while still writing request body - cerr := bodyCloser.Close() - if err == nil { - err = cerr - } - }() - req := cs.req hasTrailers := req.Trailer != nil - remainLen := actualContentLength(req) + remainLen := cs.reqBodyContentLength hasContentLen := remainLen != -1 + cc.mu.Lock() + maxFrameSize := int(cc.maxFrameSize) + cc.mu.Unlock() + + // Scratch buffer for reading into & writing from. + scratchLen := cs.frameScratchBufferLen(maxFrameSize) + var buf []byte + if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen { + defer bufPool.Put(bp) + buf = *bp + } else { + buf = make([]byte, scratchLen) + defer bufPool.Put(&buf) + } + var sawEOF bool for !sawEOF { - n, err := body.Read(buf[:len(buf)-1]) + n, err := body.Read(buf[:len(buf)]) if hasContentLen { remainLen -= int64(n) if remainLen == 0 && err == nil { @@ -1341,35 +1591,36 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( // to send the END_STREAM bit early, double-check that we're actually // at EOF. Subsequent reads should return (0, EOF) at this point. // If either value is different, we return an error in one of two ways below. + var scratch [1]byte var n1 int - n1, err = body.Read(buf[n:]) + n1, err = body.Read(scratch[:]) remainLen -= int64(n1) } if remainLen < 0 { err = errReqBodyTooLong - cc.writeStreamReset(cs.ID, ErrCodeCancel, err) return err } } - if err == io.EOF { - sawEOF = true - err = nil - } else if err != nil { - cc.writeStreamReset(cs.ID, ErrCodeCancel, err) - return err + if err != nil { + cc.mu.Lock() + bodyClosed := cs.reqBodyClosed + cc.mu.Unlock() + switch { + case bodyClosed: + return errStopReqBodyWrite + case err == io.EOF: + sawEOF = true + err = nil + default: + return err + } } remain := buf[:n] for len(remain) > 0 && err == nil { var allowed int32 allowed, err = cs.awaitFlowControl(len(remain)) - switch { - case err == errStopReqBodyWrite: - return err - case err == errStopReqBodyWriteAndCancel: - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - return err - case err != nil: + if err != nil { return err } cc.wmu.Lock() @@ -1400,24 +1651,26 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( return nil } - var trls []byte - if hasTrailers { - cc.mu.Lock() - trls, err = cc.encodeTrailers(req) - cc.mu.Unlock() - if err != nil { - cc.writeStreamReset(cs.ID, ErrCodeInternal, err) - cc.forgetStreamID(cs.ID) - return err - } - } - + // Since the RoundTrip contract permits the caller to "mutate or reuse" + // a request after the Response's Body is closed, verify that this hasn't + // happened before accessing the trailers. cc.mu.Lock() - maxFrameSize := int(cc.maxFrameSize) + trailer := req.Trailer + err = cs.abortErr cc.mu.Unlock() + if err != nil { + return err + } cc.wmu.Lock() defer cc.wmu.Unlock() + var trls []byte + if len(trailer) > 0 { + trls, err = cc.encodeTrailers(trailer) + if err != nil { + return err + } + } // Two ways to send END_STREAM: either with trailers, or // with an empty DATA frame. @@ -1438,17 +1691,24 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( // if the stream is dead. func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { cc := cs.cc + ctx := cs.ctx cc.mu.Lock() defer cc.mu.Unlock() for { if cc.closed { return 0, errClientConnClosed } - if cs.stopReqBody != nil { - return 0, cs.stopReqBody + if cs.reqBodyClosed { + return 0, errStopReqBodyWrite } - if err := cs.checkResetOrDone(); err != nil { - return 0, err + select { + case <-cs.abort: + return 0, cs.abortErr + case <-ctx.Done(): + return 0, ctx.Err() + case <-cs.reqCancel: + return 0, errRequestCanceled + default: } if a := cs.flow.available(); a > 0 { take := a @@ -1466,9 +1726,14 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) } } -// requires cc.mu be held. +var errNilRequestURL = errors.New("http2: Request.URI is nil") + +// requires cc.wmu be held. func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() + if req.URL == nil { + return nil, errNilRequestURL + } host := req.Host if host == "" { @@ -1504,7 +1769,8 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } for _, v := range vv { if !httpguts.ValidHeaderFieldValue(v) { - return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + // Don't include the value in the error, because it may be sensitive. + return nil, fmt.Errorf("invalid HTTP header value for header %q", k) } } } @@ -1531,19 +1797,21 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail var didUA bool for k, vv := range req.Header { - if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") { + if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { // Host is :authority, already sent. // Content-Length is automatic, set below. continue - } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") || - strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") || - strings.EqualFold(k, "keep-alive") { + } else if asciiEqualFold(k, "connection") || + asciiEqualFold(k, "proxy-connection") || + asciiEqualFold(k, "transfer-encoding") || + asciiEqualFold(k, "upgrade") || + asciiEqualFold(k, "keep-alive") { // Per 8.1.2.2 Connection-Specific Header // Fields, don't send connection-specific // fields. We have already checked if any // are error-worthy so just ignore the rest. continue - } else if strings.EqualFold(k, "user-agent") { + } else if asciiEqualFold(k, "user-agent") { // Match Go's http1 behavior: at most one // User-Agent. If set to nil or empty string, // then omit it. Otherwise if not mentioned, @@ -1556,7 +1824,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail if vv[0] == "" { continue } - } else if strings.EqualFold(k, "cookie") { + } else if asciiEqualFold(k, "cookie") { // Per 8.1.2.5 To allow for better compression efficiency, the // Cookie header field MAY be split into separate header fields, // each with one or more cookie-pairs. @@ -1615,7 +1883,12 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail // Header list size is ok. Write the headers. enumerateHeaders(func(name, value string) { - name = strings.ToLower(name) + name, ascii := asciiToLower(name) + if !ascii { + // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header + // field names have to be ASCII characters (just as in HTTP/1.x). + return + } cc.writeHeader(name, value) if traceHeaders { traceWroteHeaderField(trace, name, value) @@ -1647,12 +1920,12 @@ func shouldSendReqContentLength(method string, contentLength int64) bool { } } -// requires cc.mu be held. -func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { +// requires cc.wmu be held. +func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { cc.hbuf.Reset() hlSize := uint64(0) - for k, vv := range req.Trailer { + for k, vv := range trailer { for _, v := range vv { hf := hpack.HeaderField{Name: k, Value: v} hlSize += uint64(hf.Size()) @@ -1662,10 +1935,15 @@ func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { return nil, errRequestHeaderListSize } - for k, vv := range req.Trailer { + for k, vv := range trailer { + lowKey, ascii := asciiToLower(k) + if !ascii { + // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header + // field names have to be ASCII characters (just as in HTTP/1.x). + continue + } // Transfer-Encoding, etc.. have already been filtered at the // start of RoundTrip - lowKey := strings.ToLower(k) for _, v := range vv { cc.writeHeader(lowKey, v) } @@ -1687,51 +1965,51 @@ type resAndError struct { } // requires cc.mu be held. -func (cc *ClientConn) newStream() *clientStream { - cs := &clientStream{ - cc: cc, - ID: cc.nextStreamID, - resc: make(chan resAndError, 1), - peerReset: make(chan struct{}), - done: make(chan struct{}), - } +func (cc *ClientConn) addStreamLocked(cs *clientStream) { cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) cs.inflow.add(transportDefaultStreamFlow) cs.inflow.setConnFlow(&cc.inflow) + cs.ID = cc.nextStreamID cc.nextStreamID += 2 cc.streams[cs.ID] = cs - return cs + if cs.ID == 0 { + panic("assigned stream ID 0") + } } func (cc *ClientConn) forgetStreamID(id uint32) { - cc.streamByID(id, true) -} - -func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { cc.mu.Lock() - defer cc.mu.Unlock() - cs := cc.streams[id] - if andRemove && cs != nil && !cc.closed { - cc.lastActive = time.Now() - delete(cc.streams, id) - if len(cc.streams) == 0 && cc.idleTimer != nil { - cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = time.Now() - } - close(cs.done) - // Wake up checkResetOrDone via clientStream.awaitFlowControl and - // wake up RoundTrip if there is a pending request. - cc.cond.Broadcast() + slen := len(cc.streams) + delete(cc.streams, id) + if len(cc.streams) != slen-1 { + panic("forgetting unknown stream id") + } + cc.lastActive = time.Now() + if len(cc.streams) == 0 && cc.idleTimer != nil { + cc.idleTimer.Reset(cc.idleTimeout) + cc.lastIdle = time.Now() + } + // Wake up writeRequestBody via clientStream.awaitFlowControl and + // wake up RoundTrip if there is a pending request. + cc.cond.Broadcast() + + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() + if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { + if VerboseLogs { + cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, cc.nextStreamID-2) + } + cc.closed = true + defer cc.closeConn() } - return cs + + cc.mu.Unlock() } // clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. type clientConnReadLoop struct { - _ incomparable - cc *ClientConn - closeWhenIdle bool + _ incomparable + cc *ClientConn } // readLoop runs in its own goroutine and reads and dispatches frames. @@ -1769,8 +2047,8 @@ func isEOFOrNetReadError(err error) bool { func (rl *clientConnReadLoop) cleanup() { cc := rl.cc - defer cc.tconn.Close() - defer cc.t.connPool().MarkDead(cc) + cc.t.connPool().MarkDead(cc) + defer cc.closeConn() defer close(cc.readerDone) if cc.idleTimer != nil { @@ -1791,23 +2069,49 @@ func (rl *clientConnReadLoop) cleanup() { } else if err == io.EOF { err = io.ErrUnexpectedEOF } + cc.closed = true for _, cs := range cc.streams { - cs.bufPipe.CloseWithError(err) // no-op if already closed select { - case cs.resc <- resAndError{err: err}: + case <-cs.peerClosed: + // The server closed the stream before closing the conn, + // so no need to interrupt it. default: + cs.abortStreamLocked(err) } - close(cs.done) } - cc.closed = true cc.cond.Broadcast() cc.mu.Unlock() } +// countReadFrameError calls Transport.CountError with a string +// representing err. +func (cc *ClientConn) countReadFrameError(err error) { + f := cc.t.CountError + if f == nil || err == nil { + return + } + if ce, ok := err.(ConnectionError); ok { + errCode := ErrCode(ce) + f(fmt.Sprintf("read_frame_conn_error_%s", errCode.stringToken())) + return + } + if errors.Is(err, io.EOF) { + f("read_frame_eof") + return + } + if errors.Is(err, io.ErrUnexpectedEOF) { + f("read_frame_unexpected_eof") + return + } + if errors.Is(err, ErrFrameTooLarge) { + f("read_frame_too_large") + return + } + f("read_frame_other") +} + func (rl *clientConnReadLoop) run() error { cc := rl.cc - rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse - gotReply := false // ever saw a HEADERS reply gotSettings := false readIdleTimeout := cc.t.ReadIdleTimeout var t *time.Timer @@ -1824,9 +2128,7 @@ func (rl *clientConnReadLoop) run() error { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { - if cs := cc.streamByID(se.StreamID, false); cs != nil { - cs.cc.writeStreamReset(cs.ID, se.Code, err) - cs.cc.forgetStreamID(cs.ID) + if cs := rl.streamByID(se.StreamID); cs != nil { if se.Cause == nil { se.Cause = cc.fr.errDetail } @@ -1834,6 +2136,7 @@ func (rl *clientConnReadLoop) run() error { } continue } else if err != nil { + cc.countReadFrameError(err) return err } if VerboseLogs { @@ -1846,22 +2149,16 @@ func (rl *clientConnReadLoop) run() error { } gotSettings = true } - maybeIdle := false // whether frame might transition us to idle switch f := f.(type) { case *MetaHeadersFrame: err = rl.processHeaders(f) - maybeIdle = true - gotReply = true case *DataFrame: err = rl.processData(f) - maybeIdle = true case *GoAwayFrame: err = rl.processGoAway(f) - maybeIdle = true case *RSTStreamFrame: err = rl.processResetStream(f) - maybeIdle = true case *SettingsFrame: err = rl.processSettings(f) case *PushPromiseFrame: @@ -1879,38 +2176,24 @@ func (rl *clientConnReadLoop) run() error { } return err } - if rl.closeWhenIdle && gotReply && maybeIdle { - cc.closeIfIdle() - } } } func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, false) + cs := rl.streamByID(f.StreamID) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this // was just something we canceled, ignore it. return nil } - if f.StreamEnded() { - // Issue 20521: If the stream has ended, streamByID() causes - // clientStream.done to be closed, which causes the request's bodyWriter - // to be closed with an errStreamClosed, which may be received by - // clientConn.RoundTrip before the result of processing these headers. - // Deferring stream closure allows the header processing to occur first. - // clientConn.RoundTrip may still receive the bodyWriter error first, but - // the fix for issue 16102 prioritises any response. - // - // Issue 22413: If there is no request body, we should close the - // stream before writing to cs.resc so that the stream is closed - // immediately once RoundTrip returns. - if cs.req.Body != nil { - defer cc.forgetStreamID(f.StreamID) - } else { - cc.forgetStreamID(f.StreamID) - } + if cs.readClosed { + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + Cause: errors.New("protocol error: headers after END_STREAM"), + }) + return nil } if !cs.firstByte { if cs.trace != nil { @@ -1934,9 +2217,11 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { return err } // Any other error type is a stream error. - cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) - cc.forgetStreamID(cs.ID) - cs.resc <- resAndError{err: err} + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + Cause: err, + }) return nil // return nil from process* funcs to keep conn alive } if res == nil { @@ -1944,7 +2229,11 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { return nil } cs.resTrailer = &res.Trailer - cs.resc <- resAndError{res: res} + cs.res = res + close(cs.respHeaderRecv) + if f.StreamEnded() { + rl.endStream(cs) + } return nil } @@ -2006,6 +2295,9 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra } if statusCode >= 100 && statusCode <= 199 { + if f.StreamEnded() { + return nil, errors.New("1xx informational response with END_STREAM flag") + } cs.num1xx++ const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http if cs.num1xx > max1xxResponses { @@ -2018,42 +2310,49 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra } if statusCode == 100 { traceGot100Continue(cs.trace) - if cs.on100 != nil { - cs.on100() // forces any write delay timer to fire + select { + case cs.on100 <- struct{}{}: + default: } } cs.pastHeaders = false // do it all again return nil, nil } - streamEnded := f.StreamEnded() - isHead := cs.req.Method == "HEAD" - if !streamEnded || isHead { - res.ContentLength = -1 - if clens := res.Header["Content-Length"]; len(clens) == 1 { - if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil { - res.ContentLength = int64(cl) - } else { - // TODO: care? unlike http/1, it won't mess up our framing, so it's - // more safe smuggling-wise to ignore. - } - } else if len(clens) > 1 { + res.ContentLength = -1 + if clens := res.Header["Content-Length"]; len(clens) == 1 { + if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil { + res.ContentLength = int64(cl) + } else { // TODO: care? unlike http/1, it won't mess up our framing, so it's // more safe smuggling-wise to ignore. } + } else if len(clens) > 1 { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } else if f.StreamEnded() && !cs.isHead { + res.ContentLength = 0 } - if streamEnded || isHead { + if cs.isHead { res.Body = noBody return res, nil } - cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} + if f.StreamEnded() { + if res.ContentLength > 0 { + res.Body = missingBody{} + } else { + res.Body = noBody + } + return res, nil + } + + cs.bufPipe.setBuffer(&dataBuffer{expected: res.ContentLength}) cs.bytesRemain = res.ContentLength res.Body = transportResponseBody{cs} - go cs.awaitRequestCancel(cs.req) - if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + if cs.requestedGzip && asciiEqualFold(res.Header.Get("Content-Encoding"), "gzip") { res.Header.Del("Content-Encoding") res.Header.Del("Content-Length") res.ContentLength = -1 @@ -2092,8 +2391,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr } // transportResponseBody is the concrete type of Transport.RoundTrip's -// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. -// On Close it sends RST_STREAM if EOF wasn't already seen. +// Response.Body. It is an io.ReadCloser. type transportResponseBody struct { cs *clientStream } @@ -2111,7 +2409,7 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) { n = int(cs.bytesRemain) if err == nil { err = errors.New("net/http: server replied with more than declared Content-Length; truncated") - cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) + cs.abortStream(err) } cs.readErr = err return int(cs.bytesRemain), err @@ -2129,8 +2427,6 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) { } cc.mu.Lock() - defer cc.mu.Unlock() - var connAdd, streamAdd int32 // Check the conn-level first, before the stream-level. if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { @@ -2147,6 +2443,8 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) { cs.inflow.add(streamAdd) } } + cc.mu.Unlock() + if connAdd != 0 || streamAdd != 0 { cc.wmu.Lock() defer cc.wmu.Unlock() @@ -2167,34 +2465,45 @@ func (b transportResponseBody) Close() error { cs := b.cs cc := cs.cc - serverSentStreamEnd := cs.bufPipe.Err() == io.EOF unread := cs.bufPipe.Len() - - if unread > 0 || !serverSentStreamEnd { + if unread > 0 { cc.mu.Lock() - cc.wmu.Lock() - if !serverSentStreamEnd { - cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) - cs.didReset = true - } // Return connection-level flow control. if unread > 0 { cc.inflow.add(int32(unread)) + } + cc.mu.Unlock() + + // TODO(dneil): Acquiring this mutex can block indefinitely. + // Move flow control return to a goroutine? + cc.wmu.Lock() + // Return connection-level flow control. + if unread > 0 { cc.fr.WriteWindowUpdate(0, uint32(unread)) } cc.bw.Flush() cc.wmu.Unlock() - cc.mu.Unlock() } cs.bufPipe.BreakWithError(errClosedResponseBody) - cc.forgetStreamID(cs.ID) + cs.abortStream(errClosedResponseBody) + + select { + case <-cs.donec: + case <-cs.ctx.Done(): + // See golang/go#49366: The net/http package can cancel the + // request context after the response body is fully read. + // Don't treat this as an error. + return nil + case <-cs.reqCancel: + return errRequestCanceled + } return nil } func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc - cs := cc.streamByID(f.StreamID, f.StreamEnded()) + cs := rl.streamByID(f.StreamID) data := f.Data() if cs == nil { cc.mu.Lock() @@ -2223,6 +2532,14 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { } return nil } + if cs.readClosed { + cc.logf("protocol error: received DATA after END_STREAM") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } if !cs.firstByte { cc.logf("protocol error: received DATA before a HEADERS frame") rl.endStreamError(cs, StreamError{ @@ -2232,7 +2549,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { return nil } if f.Length > 0 { - if cs.req.Method == "HEAD" && len(data) > 0 { + if cs.isHead && len(data) > 0 { cc.logf("protocol error: received DATA on a HEAD request") rl.endStreamError(cs, StreamError{ StreamID: f.StreamID, @@ -2254,30 +2571,39 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { if pad := int(f.Length) - len(data); pad > 0 { refund += pad } - // Return len(data) now if the stream is already closed, - // since data will never be read. - didReset := cs.didReset - if didReset { - refund += len(data) + + didReset := false + var err error + if len(data) > 0 { + if _, err = cs.bufPipe.Write(data); err != nil { + // Return len(data) now if the stream is already closed, + // since data will never be read. + didReset = true + refund += len(data) + } } + if refund > 0 { cc.inflow.add(int32(refund)) + if !didReset { + cs.inflow.add(int32(refund)) + } + } + cc.mu.Unlock() + + if refund > 0 { cc.wmu.Lock() cc.fr.WriteWindowUpdate(0, uint32(refund)) if !didReset { - cs.inflow.add(int32(refund)) cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) } cc.bw.Flush() cc.wmu.Unlock() } - cc.mu.Unlock() - if len(data) > 0 && !didReset { - if _, err := cs.bufPipe.Write(data); err != nil { - rl.endStreamError(cs, err) - return err - } + if err != nil { + rl.endStreamError(cs, err) + return nil } } @@ -2290,24 +2616,32 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { func (rl *clientConnReadLoop) endStream(cs *clientStream) { // TODO: check that any declared content-length matches, like // server.go's (*stream).endStream method. - rl.endStreamError(cs, nil) + if !cs.readClosed { + cs.readClosed = true + // Close cs.bufPipe and cs.peerClosed with cc.mu held to avoid a + // race condition: The caller can read io.EOF from Response.Body + // and close the body before we close cs.peerClosed, causing + // cleanupWriteRequest to send a RST_STREAM. + rl.cc.mu.Lock() + defer rl.cc.mu.Unlock() + cs.bufPipe.closeWithErrorAndCode(io.EOF, cs.copyTrailers) + close(cs.peerClosed) + } } func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { - var code func() - if err == nil { - err = io.EOF - code = cs.copyTrailers - } - if isConnectionCloseRequest(cs.req) { - rl.closeWhenIdle = true - } - cs.bufPipe.closeWithErrorAndCode(err, code) + cs.readAborted = true + cs.abortStream(err) +} - select { - case cs.resc <- resAndError{err: err}: - default: +func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { + rl.cc.mu.Lock() + defer rl.cc.mu.Unlock() + cs := rl.cc.streams[id] + if cs != nil && !cs.readAborted { + return cs } + return nil } func (cs *clientStream) copyTrailers() { @@ -2326,12 +2660,33 @@ func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { if f.ErrCode != 0 { // TODO: deal with GOAWAY more. particularly the error code cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) + if fn := cc.t.CountError; fn != nil { + fn("recv_goaway_" + f.ErrCode.stringToken()) + } + } cc.setGoAway(f) return nil } func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { + cc := rl.cc + // Locking both mu and wmu here allows frame encoding to read settings with only wmu held. + // Acquiring wmu when f.IsAck() is unnecessary, but convenient and mostly harmless. + cc.wmu.Lock() + defer cc.wmu.Unlock() + + if err := rl.processSettingsNoWrite(f); err != nil { + return err + } + if !f.IsAck() { + cc.fr.WriteSettingsAck() + cc.bw.Flush() + } + return nil +} + +func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { cc := rl.cc cc.mu.Lock() defer cc.mu.Unlock() @@ -2344,12 +2699,14 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { return ConnectionError(ErrCodeProtocol) } + var seenMaxConcurrentStreams bool err := f.ForeachSetting(func(s Setting) error { switch s.ID { case SettingMaxFrameSize: cc.maxFrameSize = s.Val case SettingMaxConcurrentStreams: cc.maxConcurrentStreams = s.Val + seenMaxConcurrentStreams = true case SettingMaxHeaderListSize: cc.peerMaxHeaderListSize = uint64(s.Val) case SettingInitialWindowSize: @@ -2381,17 +2738,23 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { return err } - cc.wmu.Lock() - defer cc.wmu.Unlock() + if !cc.seenSettings { + if !seenMaxConcurrentStreams { + // This was the servers initial SETTINGS frame and it + // didn't contain a MAX_CONCURRENT_STREAMS field so + // increase the number of concurrent streams this + // connection can establish to our default. + cc.maxConcurrentStreams = defaultMaxConcurrentStreams + } + cc.seenSettings = true + } - cc.fr.WriteSettingsAck() - cc.bw.Flush() - return cc.werr + return nil } func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { cc := rl.cc - cs := cc.streamByID(f.StreamID, false) + cs := rl.streamByID(f.StreamID) if f.StreamID != 0 && cs == nil { return nil } @@ -2411,24 +2774,22 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { } func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.cc.streamByID(f.StreamID, true) + cs := rl.streamByID(f.StreamID) if cs == nil { - // TODO: return error if server tries to RST_STEAM an idle stream + // TODO: return error if server tries to RST_STREAM an idle stream return nil } - select { - case <-cs.peerReset: - // Already reset. - // This is the only goroutine - // which closes this, so there - // isn't a race. - default: - err := streamError(cs.ID, f.ErrCode) - cs.resetErr = err - close(cs.peerReset) - cs.bufPipe.CloseWithError(err) - cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl + serr := streamError(cs.ID, f.ErrCode) + serr.Cause = errFromPeer + if f.ErrCode == ErrCodeProtocol { + rl.cc.SetDoNotReuse() } + if fn := cs.cc.t.CountError; fn != nil { + fn("recv_rststream_" + f.ErrCode.stringToken()) + } + cs.abortStream(serr) + + cs.bufPipe.CloseWithError(serr) return nil } @@ -2450,19 +2811,24 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } cc.mu.Unlock() } - cc.wmu.Lock() - if err := cc.fr.WritePing(false, p); err != nil { - cc.wmu.Unlock() - return err - } - if err := cc.bw.Flush(); err != nil { - cc.wmu.Unlock() - return err - } - cc.wmu.Unlock() + errc := make(chan error, 1) + go func() { + cc.wmu.Lock() + defer cc.wmu.Unlock() + if err := cc.fr.WritePing(false, p); err != nil { + errc <- err + return + } + if err := cc.bw.Flush(); err != nil { + errc <- err + return + } + }() select { case <-c: return nil + case err := <-errc: + return err case <-ctx.Done(): return ctx.Err() case <-cc.readerDone: @@ -2537,7 +2903,17 @@ func (t *Transport) logf(format string, args ...interface{}) { log.Printf(format, args...) } -var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) +var noBody io.ReadCloser = noBodyReader{} + +type noBodyReader struct{} + +func (noBodyReader) Close() error { return nil } +func (noBodyReader) Read([]byte) (int, error) { return 0, io.EOF } + +type missingBody struct{} + +func (missingBody) Close() error { return nil } +func (missingBody) Read([]byte) (int, error) { return 0, io.ErrUnexpectedEOF } func strSliceContains(ss []string, s string) bool { for _, v := range ss { @@ -2584,87 +2960,6 @@ type errorReader struct{ err error } func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } -// bodyWriterState encapsulates various state around the Transport's writing -// of the request body, particularly regarding doing delayed writes of the body -// when the request contains "Expect: 100-continue". -type bodyWriterState struct { - cs *clientStream - timer *time.Timer // if non-nil, we're doing a delayed write - fnonce *sync.Once // to call fn with - fn func() // the code to run in the goroutine, writing the body - resc chan error // result of fn's execution - delay time.Duration // how long we should delay a delayed write for -} - -func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { - s.cs = cs - if body == nil { - return - } - resc := make(chan error, 1) - s.resc = resc - s.fn = func() { - cs.cc.mu.Lock() - cs.startedWrite = true - cs.cc.mu.Unlock() - resc <- cs.writeRequestBody(body, cs.req.Body) - } - s.delay = t.expectContinueTimeout() - if s.delay == 0 || - !httpguts.HeaderValuesContainsToken( - cs.req.Header["Expect"], - "100-continue") { - return - } - s.fnonce = new(sync.Once) - - // Arm the timer with a very large duration, which we'll - // intentionally lower later. It has to be large now because - // we need a handle to it before writing the headers, but the - // s.delay value is defined to not start until after the - // request headers were written. - const hugeDuration = 365 * 24 * time.Hour - s.timer = time.AfterFunc(hugeDuration, func() { - s.fnonce.Do(s.fn) - }) - return -} - -func (s bodyWriterState) cancel() { - if s.timer != nil { - if s.timer.Stop() { - s.resc <- nil - } - } -} - -func (s bodyWriterState) on100() { - if s.timer == nil { - // If we didn't do a delayed write, ignore the server's - // bogus 100 continue response. - return - } - s.timer.Stop() - go func() { s.fnonce.Do(s.fn) }() -} - -// scheduleBodyWrite starts writing the body, either immediately (in -// the common case) or after the delay timeout. It should not be -// called until after the headers have been written. -func (s bodyWriterState) scheduleBodyWrite() { - if s.timer == nil { - // We're not doing a delayed write (see - // getBodyWriterState), so just start the writing - // goroutine immediately. - go s.fn() - return - } - traceWait100Continue(s.cs.trace) - if s.timer.Stop() { - s.timer.Reset(s.delay) - } -} - // isConnectionCloseRequest reports whether req should use its own // connection for a single request and then close the connection. func isConnectionCloseRequest(req *http.Request) bool { diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 3849bc263..33f61398a 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -341,7 +341,12 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { } for _, k := range keys { vv := h[k] - k = lowerHeader(k) + k, ascii := lowerHeader(k) + if !ascii { + // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header + // field names have to be ASCII characters (just as in HTTP/1.x). + continue + } if !validWireHeaderFieldName(k) { // Skip it as backup paranoia. Per // golang.org/issue/14048, these should diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go index f24d2b1e7..c7cd00173 100644 --- a/vendor/golang.org/x/net/http2/writesched.go +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -32,7 +32,8 @@ type WriteScheduler interface { // Pop dequeues the next frame to write. Returns false if no frames can // be written. Frames with a given wr.StreamID() are Pop'd in the same - // order they are Push'd. No frames should be discarded except by CloseStream. + // order they are Push'd, except RST_STREAM frames. No frames should be + // discarded except by CloseStream. Pop() (wr FrameWriteRequest, ok bool) } @@ -52,6 +53,7 @@ type FrameWriteRequest struct { // stream is the stream on which this frame will be written. // nil for non-stream frames like PING and SETTINGS. + // nil for RST_STREAM streams, which use the StreamError.StreamID field instead. stream *stream // done, if non-nil, must be a buffered channel with space for diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go index 2618b2c11..0a242c669 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -383,16 +383,15 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { var n *priorityNode - if id := wr.StreamID(); id == 0 { + if wr.isControl() { n = &ws.root } else { + id := wr.StreamID() n = ws.nodes[id] if n == nil { // id is an idle or closed stream. wr should not be a HEADERS or - // DATA frame. However, wr can be a RST_STREAM. In this case, we - // push wr onto the root, rather than creating a new priorityNode, - // since RST_STREAM is tiny and the stream's priority is unknown - // anyway. See issue #17919. + // DATA frame. In other case, we push wr onto the root, rather + // than creating a new priorityNode. if wr.DataSize() > 0 { panic("add DATA on non-open stream") } diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go index 9a7b9e581..f2e55e05c 100644 --- a/vendor/golang.org/x/net/http2/writesched_random.go +++ b/vendor/golang.org/x/net/http2/writesched_random.go @@ -45,11 +45,11 @@ func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityP } func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { - id := wr.StreamID() - if id == 0 { + if wr.isControl() { ws.zero.push(wr) return } + id := wr.StreamID() q, ok := ws.sq[id] if !ok { q = ws.queuePool.get() @@ -59,7 +59,7 @@ func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { } func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { - // Control frames first. + // Control and RST_STREAM frames first. if !ws.zero.empty() { return ws.zero.shift(), true } diff --git a/vendor/golang.org/x/net/idna/go118.go b/vendor/golang.org/x/net/idna/go118.go new file mode 100644 index 000000000..c5c4338db --- /dev/null +++ b/vendor/golang.org/x/net/idna/go118.go @@ -0,0 +1,14 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package idna + +// Transitional processing is disabled by default in Go 1.18. +// https://golang.org/issue/47510 +const transitionalLookup = false diff --git a/vendor/golang.org/x/net/idna/idna10.0.0.go b/vendor/golang.org/x/net/idna/idna10.0.0.go index 7e69ee1b2..64ccf85fe 100644 --- a/vendor/golang.org/x/net/idna/idna10.0.0.go +++ b/vendor/golang.org/x/net/idna/idna10.0.0.go @@ -59,23 +59,22 @@ type Option func(*options) // Transitional sets a Profile to use the Transitional mapping as defined in UTS // #46. This will cause, for example, "ß" to be mapped to "ss". Using the // transitional mapping provides a compromise between IDNA2003 and IDNA2008 -// compatibility. It is used by most browsers when resolving domain names. This +// compatibility. It is used by some browsers when resolving domain names. This // option is only meaningful if combined with MapForLookup. func Transitional(transitional bool) Option { - return func(o *options) { o.transitional = true } + return func(o *options) { o.transitional = transitional } } // VerifyDNSLength sets whether a Profile should fail if any of the IDN parts // are longer than allowed by the RFC. +// +// This option corresponds to the VerifyDnsLength flag in UTS #46. func VerifyDNSLength(verify bool) Option { return func(o *options) { o.verifyDNSLength = verify } } // RemoveLeadingDots removes leading label separators. Leading runes that map to // dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. -// -// This is the behavior suggested by the UTS #46 and is adopted by some -// browsers. func RemoveLeadingDots(remove bool) Option { return func(o *options) { o.removeLeadingDots = remove } } @@ -83,6 +82,8 @@ func RemoveLeadingDots(remove bool) Option { // ValidateLabels sets whether to check the mandatory label validation criteria // as defined in Section 5.4 of RFC 5891. This includes testing for correct use // of hyphens ('-'), normalization, validity of runes, and the context rules. +// In particular, ValidateLabels also sets the CheckHyphens and CheckJoiners flags +// in UTS #46. func ValidateLabels(enable bool) Option { return func(o *options) { // Don't override existing mappings, but set one that at least checks @@ -91,25 +92,48 @@ func ValidateLabels(enable bool) Option { o.mapping = normalize } o.trie = trie - o.validateLabels = enable - o.fromPuny = validateFromPunycode + o.checkJoiners = enable + o.checkHyphens = enable + if enable { + o.fromPuny = validateFromPunycode + } else { + o.fromPuny = nil + } + } +} + +// CheckHyphens sets whether to check for correct use of hyphens ('-') in +// labels. Most web browsers do not have this option set, since labels such as +// "r3---sn-apo3qvuoxuxbt-j5pe" are in common use. +// +// This option corresponds to the CheckHyphens flag in UTS #46. +func CheckHyphens(enable bool) Option { + return func(o *options) { o.checkHyphens = enable } +} + +// CheckJoiners sets whether to check the ContextJ rules as defined in Appendix +// A of RFC 5892, concerning the use of joiner runes. +// +// This option corresponds to the CheckJoiners flag in UTS #46. +func CheckJoiners(enable bool) Option { + return func(o *options) { + o.trie = trie + o.checkJoiners = enable } } // StrictDomainName limits the set of permissible ASCII characters to those // allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the -// hyphen). This is set by default for MapForLookup and ValidateForRegistration. +// hyphen). This is set by default for MapForLookup and ValidateForRegistration, +// but is only useful if ValidateLabels is set. // // This option is useful, for instance, for browsers that allow characters // outside this range, for example a '_' (U+005F LOW LINE). See -// http://www.rfc-editor.org/std/std3.txt for more details This option -// corresponds to the UseSTD3ASCIIRules option in UTS #46. +// http://www.rfc-editor.org/std/std3.txt for more details. +// +// This option corresponds to the UseSTD3ASCIIRules flag in UTS #46. func StrictDomainName(use bool) Option { - return func(o *options) { - o.trie = trie - o.useSTD3Rules = use - o.fromPuny = validateFromPunycode - } + return func(o *options) { o.useSTD3Rules = use } } // NOTE: the following options pull in tables. The tables should not be linked @@ -117,6 +141,8 @@ func StrictDomainName(use bool) Option { // BidiRule enables the Bidi rule as defined in RFC 5893. Any application // that relies on proper validation of labels should include this rule. +// +// This option corresponds to the CheckBidi flag in UTS #46. func BidiRule() Option { return func(o *options) { o.bidirule = bidirule.ValidString } } @@ -152,7 +178,8 @@ func MapForLookup() Option { type options struct { transitional bool useSTD3Rules bool - validateLabels bool + checkHyphens bool + checkJoiners bool verifyDNSLength bool removeLeadingDots bool @@ -225,8 +252,11 @@ func (p *Profile) String() string { if p.useSTD3Rules { s += ":UseSTD3Rules" } - if p.validateLabels { - s += ":ValidateLabels" + if p.checkHyphens { + s += ":CheckHyphens" + } + if p.checkJoiners { + s += ":CheckJoiners" } if p.verifyDNSLength { s += ":VerifyDNSLength" @@ -254,26 +284,29 @@ var ( punycode = &Profile{} lookup = &Profile{options{ - transitional: true, - useSTD3Rules: true, - validateLabels: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateAndMap, - bidirule: bidirule.ValidString, + transitional: transitionalLookup, + useSTD3Rules: true, + checkHyphens: true, + checkJoiners: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, }} display = &Profile{options{ - useSTD3Rules: true, - validateLabels: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateAndMap, - bidirule: bidirule.ValidString, + useSTD3Rules: true, + checkHyphens: true, + checkJoiners: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, }} registration = &Profile{options{ useSTD3Rules: true, - validateLabels: true, verifyDNSLength: true, + checkHyphens: true, + checkJoiners: true, trie: trie, fromPuny: validateFromPunycode, mapping: validateRegistration, @@ -340,7 +373,7 @@ func (p *Profile) process(s string, toASCII bool) (string, error) { } isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight labels.set(u) - if err == nil && p.validateLabels { + if err == nil && p.fromPuny != nil { err = p.fromPuny(p, u) } if err == nil { @@ -681,16 +714,18 @@ func (p *Profile) validateLabel(s string) (err error) { } return nil } - if !p.validateLabels { - return nil - } - trie := p.trie // p.validateLabels is only set if trie is set. - if len(s) > 4 && s[2] == '-' && s[3] == '-' { - return &labelError{s, "V2"} + if p.checkHyphens { + if len(s) > 4 && s[2] == '-' && s[3] == '-' { + return &labelError{s, "V2"} + } + if s[0] == '-' || s[len(s)-1] == '-' { + return &labelError{s, "V3"} + } } - if s[0] == '-' || s[len(s)-1] == '-' { - return &labelError{s, "V3"} + if !p.checkJoiners { + return nil } + trie := p.trie // p.checkJoiners is only set if trie is set. // TODO: merge the use of this in the trie. v, sz := trie.lookupString(s) x := info(v) diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go index 7c7456374..aae6aac87 100644 --- a/vendor/golang.org/x/net/idna/idna9.0.0.go +++ b/vendor/golang.org/x/net/idna/idna9.0.0.go @@ -58,23 +58,22 @@ type Option func(*options) // Transitional sets a Profile to use the Transitional mapping as defined in UTS // #46. This will cause, for example, "ß" to be mapped to "ss". Using the // transitional mapping provides a compromise between IDNA2003 and IDNA2008 -// compatibility. It is used by most browsers when resolving domain names. This +// compatibility. It is used by some browsers when resolving domain names. This // option is only meaningful if combined with MapForLookup. func Transitional(transitional bool) Option { - return func(o *options) { o.transitional = true } + return func(o *options) { o.transitional = transitional } } // VerifyDNSLength sets whether a Profile should fail if any of the IDN parts // are longer than allowed by the RFC. +// +// This option corresponds to the VerifyDnsLength flag in UTS #46. func VerifyDNSLength(verify bool) Option { return func(o *options) { o.verifyDNSLength = verify } } // RemoveLeadingDots removes leading label separators. Leading runes that map to // dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. -// -// This is the behavior suggested by the UTS #46 and is adopted by some -// browsers. func RemoveLeadingDots(remove bool) Option { return func(o *options) { o.removeLeadingDots = remove } } @@ -82,6 +81,8 @@ func RemoveLeadingDots(remove bool) Option { // ValidateLabels sets whether to check the mandatory label validation criteria // as defined in Section 5.4 of RFC 5891. This includes testing for correct use // of hyphens ('-'), normalization, validity of runes, and the context rules. +// In particular, ValidateLabels also sets the CheckHyphens and CheckJoiners flags +// in UTS #46. func ValidateLabels(enable bool) Option { return func(o *options) { // Don't override existing mappings, but set one that at least checks @@ -90,25 +91,48 @@ func ValidateLabels(enable bool) Option { o.mapping = normalize } o.trie = trie - o.validateLabels = enable - o.fromPuny = validateFromPunycode + o.checkJoiners = enable + o.checkHyphens = enable + if enable { + o.fromPuny = validateFromPunycode + } else { + o.fromPuny = nil + } + } +} + +// CheckHyphens sets whether to check for correct use of hyphens ('-') in +// labels. Most web browsers do not have this option set, since labels such as +// "r3---sn-apo3qvuoxuxbt-j5pe" are in common use. +// +// This option corresponds to the CheckHyphens flag in UTS #46. +func CheckHyphens(enable bool) Option { + return func(o *options) { o.checkHyphens = enable } +} + +// CheckJoiners sets whether to check the ContextJ rules as defined in Appendix +// A of RFC 5892, concerning the use of joiner runes. +// +// This option corresponds to the CheckJoiners flag in UTS #46. +func CheckJoiners(enable bool) Option { + return func(o *options) { + o.trie = trie + o.checkJoiners = enable } } // StrictDomainName limits the set of permissable ASCII characters to those // allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the -// hyphen). This is set by default for MapForLookup and ValidateForRegistration. +// hyphen). This is set by default for MapForLookup and ValidateForRegistration, +// but is only useful if ValidateLabels is set. // // This option is useful, for instance, for browsers that allow characters // outside this range, for example a '_' (U+005F LOW LINE). See -// http://www.rfc-editor.org/std/std3.txt for more details This option -// corresponds to the UseSTD3ASCIIRules option in UTS #46. +// http://www.rfc-editor.org/std/std3.txt for more details. +// +// This option corresponds to the UseSTD3ASCIIRules flag in UTS #46. func StrictDomainName(use bool) Option { - return func(o *options) { - o.trie = trie - o.useSTD3Rules = use - o.fromPuny = validateFromPunycode - } + return func(o *options) { o.useSTD3Rules = use } } // NOTE: the following options pull in tables. The tables should not be linked @@ -116,6 +140,8 @@ func StrictDomainName(use bool) Option { // BidiRule enables the Bidi rule as defined in RFC 5893. Any application // that relies on proper validation of labels should include this rule. +// +// This option corresponds to the CheckBidi flag in UTS #46. func BidiRule() Option { return func(o *options) { o.bidirule = bidirule.ValidString } } @@ -152,7 +178,8 @@ func MapForLookup() Option { type options struct { transitional bool useSTD3Rules bool - validateLabels bool + checkHyphens bool + checkJoiners bool verifyDNSLength bool removeLeadingDots bool @@ -225,8 +252,11 @@ func (p *Profile) String() string { if p.useSTD3Rules { s += ":UseSTD3Rules" } - if p.validateLabels { - s += ":ValidateLabels" + if p.checkHyphens { + s += ":CheckHyphens" + } + if p.checkJoiners { + s += ":CheckJoiners" } if p.verifyDNSLength { s += ":VerifyDNSLength" @@ -255,9 +285,10 @@ var ( punycode = &Profile{} lookup = &Profile{options{ transitional: true, - useSTD3Rules: true, - validateLabels: true, removeLeadingDots: true, + useSTD3Rules: true, + checkHyphens: true, + checkJoiners: true, trie: trie, fromPuny: validateFromPunycode, mapping: validateAndMap, @@ -265,8 +296,9 @@ var ( }} display = &Profile{options{ useSTD3Rules: true, - validateLabels: true, removeLeadingDots: true, + checkHyphens: true, + checkJoiners: true, trie: trie, fromPuny: validateFromPunycode, mapping: validateAndMap, @@ -274,8 +306,9 @@ var ( }} registration = &Profile{options{ useSTD3Rules: true, - validateLabels: true, verifyDNSLength: true, + checkHyphens: true, + checkJoiners: true, trie: trie, fromPuny: validateFromPunycode, mapping: validateRegistration, @@ -339,7 +372,7 @@ func (p *Profile) process(s string, toASCII bool) (string, error) { continue } labels.set(u) - if err == nil && p.validateLabels { + if err == nil && p.fromPuny != nil { err = p.fromPuny(p, u) } if err == nil { @@ -629,16 +662,18 @@ func (p *Profile) validateLabel(s string) error { if p.bidirule != nil && !p.bidirule(s) { return &labelError{s, "B"} } - if !p.validateLabels { - return nil - } - trie := p.trie // p.validateLabels is only set if trie is set. - if len(s) > 4 && s[2] == '-' && s[3] == '-' { - return &labelError{s, "V2"} + if p.checkHyphens { + if len(s) > 4 && s[2] == '-' && s[3] == '-' { + return &labelError{s, "V2"} + } + if s[0] == '-' || s[len(s)-1] == '-' { + return &labelError{s, "V3"} + } } - if s[0] == '-' || s[len(s)-1] == '-' { - return &labelError{s, "V3"} + if !p.checkJoiners { + return nil } + trie := p.trie // p.checkJoiners is only set if trie is set. // TODO: merge the use of this in the trie. v, sz := trie.lookupString(s) x := info(v) diff --git a/vendor/golang.org/x/net/idna/pre_go118.go b/vendor/golang.org/x/net/idna/pre_go118.go new file mode 100644 index 000000000..3aaccab1c --- /dev/null +++ b/vendor/golang.org/x/net/idna/pre_go118.go @@ -0,0 +1,12 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package idna + +const transitionalLookup = true diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go index 02c7d59af..e8e3ac11a 100644 --- a/vendor/golang.org/x/net/idna/punycode.go +++ b/vendor/golang.org/x/net/idna/punycode.go @@ -49,6 +49,7 @@ func decode(encoded string) (string, error) { } } i, n, bias := int32(0), initialN, initialBias + overflow := false for pos < len(encoded) { oldI, w := i, int32(1) for k := base; ; k += base { @@ -60,29 +61,32 @@ func decode(encoded string) (string, error) { return "", punyError(encoded) } pos++ - i += digit * w - if i < 0 { + i, overflow = madd(i, digit, w) + if overflow { return "", punyError(encoded) } t := k - bias - if t < tmin { + if k <= bias { t = tmin - } else if t > tmax { + } else if k >= bias+tmax { t = tmax } if digit < t { break } - w *= base - t - if w >= math.MaxInt32/base { + w, overflow = madd(0, w, base-t) + if overflow { return "", punyError(encoded) } } + if len(output) >= 1024 { + return "", punyError(encoded) + } x := int32(len(output) + 1) bias = adapt(i-oldI, x, oldI == 0) n += i / x i %= x - if n > utf8.MaxRune || len(output) >= 1024 { + if n < 0 || n > utf8.MaxRune { return "", punyError(encoded) } output = append(output, 0) @@ -115,6 +119,7 @@ func encode(prefix, s string) (string, error) { if b > 0 { output = append(output, '-') } + overflow := false for remaining != 0 { m := int32(0x7fffffff) for _, r := range s { @@ -122,8 +127,8 @@ func encode(prefix, s string) (string, error) { m = r } } - delta += (m - n) * (h + 1) - if delta < 0 { + delta, overflow = madd(delta, m-n, h+1) + if overflow { return "", punyError(s) } n = m @@ -141,9 +146,9 @@ func encode(prefix, s string) (string, error) { q := delta for k := base; ; k += base { t := k - bias - if t < tmin { + if k <= bias { t = tmin - } else if t > tmax { + } else if k >= bias+tmax { t = tmax } if q < t { @@ -164,6 +169,15 @@ func encode(prefix, s string) (string, error) { return string(output), nil } +// madd computes a + (b * c), detecting overflow. +func madd(a, b, c int32) (next int32, overflow bool) { + p := int64(b) * int64(c) + if p > math.MaxInt32-int64(a) { + return 0, true + } + return a + int32(p), false +} + func decodeDigit(x byte) (digit int32, ok bool) { switch { case '0' <= x && x <= '9': diff --git a/vendor/golang.org/x/net/idna/trieval.go b/vendor/golang.org/x/net/idna/trieval.go index 7a8cf889b..9c070a44b 100644 --- a/vendor/golang.org/x/net/idna/trieval.go +++ b/vendor/golang.org/x/net/idna/trieval.go @@ -17,23 +17,23 @@ package idna // // The per-rune values have the following format: // -// if mapped { -// if inlinedXOR { -// 15..13 inline XOR marker -// 12..11 unused -// 10..3 inline XOR mask -// } else { -// 15..3 index into xor or mapping table -// } -// } else { -// 15..14 unused -// 13 mayNeedNorm -// 12..11 attributes -// 10..8 joining type -// 7..3 category type -// } -// 2 use xor pattern -// 1..0 mapped category +// if mapped { +// if inlinedXOR { +// 15..13 inline XOR marker +// 12..11 unused +// 10..3 inline XOR mask +// } else { +// 15..3 index into xor or mapping table +// } +// } else { +// 15..14 unused +// 13 mayNeedNorm +// 12..11 attributes +// 10..8 joining type +// 7..3 category type +// } +// 2 use xor pattern +// 1..0 mapped category // // See the definitions below for a more detailed description of the various // bits. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 9857fe53d..4c0850a45 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -8,22 +8,35 @@ package errgroup import ( "context" + "fmt" "sync" ) +type token struct{} + // A Group is a collection of goroutines working on subtasks that are part of // the same overall task. // -// A zero Group is valid and does not cancel on error. +// A zero Group is valid, has no limit on the number of active goroutines, +// and does not cancel on error. type Group struct { cancel func() wg sync.WaitGroup + sem chan token + errOnce sync.Once err error } +func (g *Group) done() { + if g.sem != nil { + <-g.sem + } + g.wg.Done() +} + // WithContext returns a new Group and an associated Context derived from ctx. // // The derived Context is canceled the first time a function passed to Go @@ -45,14 +58,48 @@ func (g *Group) Wait() error { } // Go calls the given function in a new goroutine. +// It blocks until the new goroutine can be added without the number of +// active goroutines in the group exceeding the configured limit. // // The first call to return a non-nil error cancels the group; its error will be // returned by Wait. func (g *Group) Go(f func() error) { + if g.sem != nil { + g.sem <- token{} + } + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel() + } + }) + } + }() +} + +// TryGo calls the given function in a new goroutine only if the number of +// active goroutines in the group is currently below the configured limit. +// +// The return value reports whether the goroutine was started. +func (g *Group) TryGo(f func() error) bool { + if g.sem != nil { + select { + case g.sem <- token{}: + // Note: this allows barging iff channels in general allow barging. + default: + return false + } + } + g.wg.Add(1) go func() { - defer g.wg.Done() + defer g.done() if err := f(); err != nil { g.errOnce.Do(func() { @@ -63,4 +110,23 @@ func (g *Group) Go(f func() error) { }) } }() + return true +} + +// SetLimit limits the number of active goroutines in this group to at most n. +// A negative value indicates no limit. +// +// Any subsequent call to the Go method will block until it can add an active +// goroutine without exceeding the configured limit. +// +// The limit must not be modified while any goroutines in the group are active. +func (g *Group) SetLimit(n int) { + if n < 0 { + g.sem = nil + return + } + if len(g.sem) != 0 { + panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) + } + g.sem = make(chan token, n) } diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go deleted file mode 100644 index 30f632c57..000000000 --- a/vendor/golang.org/x/sync/semaphore/semaphore.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package semaphore provides a weighted semaphore implementation. -package semaphore // import "golang.org/x/sync/semaphore" - -import ( - "container/list" - "context" - "sync" -) - -type waiter struct { - n int64 - ready chan<- struct{} // Closed when semaphore acquired. -} - -// NewWeighted creates a new weighted semaphore with the given -// maximum combined weight for concurrent access. -func NewWeighted(n int64) *Weighted { - w := &Weighted{size: n} - return w -} - -// Weighted provides a way to bound concurrent access to a resource. -// The callers can request access with a given weight. -type Weighted struct { - size int64 - cur int64 - mu sync.Mutex - waiters list.List -} - -// Acquire acquires the semaphore with a weight of n, blocking until resources -// are available or ctx is done. On success, returns nil. On failure, returns -// ctx.Err() and leaves the semaphore unchanged. -// -// If ctx is already done, Acquire may still succeed without blocking. -func (s *Weighted) Acquire(ctx context.Context, n int64) error { - s.mu.Lock() - if s.size-s.cur >= n && s.waiters.Len() == 0 { - s.cur += n - s.mu.Unlock() - return nil - } - - if n > s.size { - // Don't make other Acquire calls block on one that's doomed to fail. - s.mu.Unlock() - <-ctx.Done() - return ctx.Err() - } - - ready := make(chan struct{}) - w := waiter{n: n, ready: ready} - elem := s.waiters.PushBack(w) - s.mu.Unlock() - - select { - case <-ctx.Done(): - err := ctx.Err() - s.mu.Lock() - select { - case <-ready: - // Acquired the semaphore after we were canceled. Rather than trying to - // fix up the queue, just pretend we didn't notice the cancelation. - err = nil - default: - isFront := s.waiters.Front() == elem - s.waiters.Remove(elem) - // If we're at the front and there're extra tokens left, notify other waiters. - if isFront && s.size > s.cur { - s.notifyWaiters() - } - } - s.mu.Unlock() - return err - - case <-ready: - return nil - } -} - -// TryAcquire acquires the semaphore with a weight of n without blocking. -// On success, returns true. On failure, returns false and leaves the semaphore unchanged. -func (s *Weighted) TryAcquire(n int64) bool { - s.mu.Lock() - success := s.size-s.cur >= n && s.waiters.Len() == 0 - if success { - s.cur += n - } - s.mu.Unlock() - return success -} - -// Release releases the semaphore with a weight of n. -func (s *Weighted) Release(n int64) { - s.mu.Lock() - s.cur -= n - if s.cur < 0 { - s.mu.Unlock() - panic("semaphore: released more than held") - } - s.notifyWaiters() - s.mu.Unlock() -} - -func (s *Weighted) notifyWaiters() { - for { - next := s.waiters.Front() - if next == nil { - break // No more waiters blocked. - } - - w := next.Value.(waiter) - if s.size-s.cur < w.n { - // Not enough tokens for the next waiter. We could keep going (to try to - // find a waiter with a smaller request), but under load that could cause - // starvation for large requests; instead, we leave all remaining waiters - // blocked. - // - // Consider a semaphore used as a read-write lock, with N tokens, N - // readers, and one writer. Each reader can Acquire(1) to obtain a read - // lock. The writer can Acquire(N) to obtain a write lock, excluding all - // of the readers. If we allow the readers to jump ahead in the queue, - // the writer will starve — there is always one token available for every - // reader. - break - } - - s.cur += w.n - s.waiters.Remove(next) - close(w.ready) - } -} diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go index dcbb14ef3..271055be0 100644 --- a/vendor/golang.org/x/sys/cpu/byteorder.go +++ b/vendor/golang.org/x/sys/cpu/byteorder.go @@ -46,6 +46,7 @@ func hostByteOrder() byteOrder { case "386", "amd64", "amd64p32", "alpha", "arm", "arm64", + "loong64", "mipsle", "mips64le", "mips64p32le", "nios2", "ppc64le", diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index abbec2d44..83f112c4c 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -56,6 +56,7 @@ var X86 struct { HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions HasBMI1 bool // Bit manipulation instruction set 1 HasBMI2 bool // Bit manipulation instruction set 2 + HasCX16 bool // Compare and exchange 16 Bytes HasERMS bool // Enhanced REP for MOVSB and STOSB HasFMA bool // Fused-multiply-add instructions HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. @@ -105,8 +106,8 @@ var ARM64 struct { // ARM contains the supported CPU features of the current ARM (32-bit) platform. // All feature flags are false if: -// 1. the current platform is not arm, or -// 2. the current operating system is not Linux. +// 1. the current platform is not arm, or +// 2. the current operating system is not Linux. var ARM struct { _ CacheLinePad HasSWP bool // SWP instruction support diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go index 3298a87e9..fa7cdb9bc 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -15,7 +15,3 @@ func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) // xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler // and in cpu_gccgo.c for gccgo. func xgetbv() (eax, edx uint32) - -// darwinSupportsAVX512 is implemented in cpu_x86.s for gc compiler -// and in cpu_gccgo_x86.go for gccgo. -func darwinSupportsAVX512() bool diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c index e363c7d13..a4605e6d1 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -7,6 +7,7 @@ #include #include +#include // Need to wrap __get_cpuid_count because it's declared as static. int @@ -17,27 +18,21 @@ gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); } +#pragma GCC diagnostic ignored "-Wunknown-pragmas" +#pragma GCC push_options +#pragma GCC target("xsave") +#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function) + // xgetbv reads the contents of an XCR (Extended Control Register) // specified in the ECX register into registers EDX:EAX. // Currently, the only supported value for XCR is 0. -// -// TODO: Replace with a better alternative: -// -// #include -// -// #pragma GCC target("xsave") -// -// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { -// unsigned long long x = _xgetbv(0); -// *eax = x & 0xffffffff; -// *edx = (x >> 32) & 0xffffffff; -// } -// -// Note that _xgetbv is defined starting with GCC 8. void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { - __asm(" xorl %%ecx, %%ecx\n" - " xgetbv" - : "=a"(*eax), "=d"(*edx)); + uint64_t v = _xgetbv(0); + *eax = v & 0xffffffff; + *edx = v >> 32; } + +#pragma clang attribute pop +#pragma GCC pop_options diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go new file mode 100644 index 000000000..0f57b05bd --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.go @@ -0,0 +1,13 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 +// +build loong64 + +package cpu + +const cacheLineSize = 64 + +func initOptions() { +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index 54ca4667f..f5aacfc82 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -39,6 +39,7 @@ func initOptions() { {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, {Name: "bmi1", Feature: &X86.HasBMI1}, {Name: "bmi2", Feature: &X86.HasBMI2}, + {Name: "cx16", Feature: &X86.HasCX16}, {Name: "erms", Feature: &X86.HasERMS}, {Name: "fma", Feature: &X86.HasFMA}, {Name: "osxsave", Feature: &X86.HasOSXSAVE}, @@ -73,6 +74,7 @@ func archInit() { X86.HasPCLMULQDQ = isSet(1, ecx1) X86.HasSSSE3 = isSet(9, ecx1) X86.HasFMA = isSet(12, ecx1) + X86.HasCX16 = isSet(13, ecx1) X86.HasSSE41 = isSet(19, ecx1) X86.HasSSE42 = isSet(20, ecx1) X86.HasPOPCNT = isSet(23, ecx1) @@ -88,9 +90,10 @@ func archInit() { osSupportsAVX = isSet(1, eax) && isSet(2, eax) if runtime.GOOS == "darwin" { - // Check darwin commpage for AVX512 support. Necessary because: - // https://github.com/apple/darwin-xnu/blob/0a798f6738bc1db01281fc08ae024145e84df927/osfmk/i386/fpu.c#L175-L201 - osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512() + // Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers. + // Since users can't rely on mask register contents, let's not advertise AVX-512 support. + // See issue 49233. + osSupportsAVX512 = false } else { // Check if OPMASK and ZMM registers have OS support. osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.s b/vendor/golang.org/x/sys/cpu/cpu_x86.s index b748ba52f..39acab2ff 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.s +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.s @@ -26,27 +26,3 @@ TEXT ·xgetbv(SB),NOSPLIT,$0-8 MOVL AX, eax+0(FP) MOVL DX, edx+4(FP) RET - -// func darwinSupportsAVX512() bool -TEXT ·darwinSupportsAVX512(SB), NOSPLIT, $0-1 - MOVB $0, ret+0(FP) // default to false -#ifdef GOOS_darwin // return if not darwin -#ifdef GOARCH_amd64 // return if not amd64 -// These values from: -// https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/osfmk/i386/cpu_capabilities.h -#define commpage64_base_address 0x00007fffffe00000 -#define commpage64_cpu_capabilities64 (commpage64_base_address+0x010) -#define commpage64_version (commpage64_base_address+0x01E) -#define hasAVX512F 0x0000004000000000 - MOVQ $commpage64_version, BX - CMPW (BX), $13 // cpu_capabilities64 undefined in versions < 13 - JL no_avx512 - MOVQ $commpage64_cpu_capabilities64, BX - MOVQ $hasAVX512F, CX - TESTQ (BX), CX - JZ no_avx512 - MOVB $1, ret+0(FP) -no_avx512: -#endif -#endif - RET diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go index a864f24d7..96134157a 100644 --- a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -5,7 +5,7 @@ // Recreate a getsystemcfg syscall handler instead of // using the one provided by x/sys/unix to avoid having // the dependency between them. (See golang.org/issue/32102) -// Morever, this file will be used during the building of +// Moreover, this file will be used during the building of // gccgo's libgo and thus must not used a CGo method. //go:build aix && gccgo diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index 474efad0e..7d3c060e1 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -149,7 +149,7 @@ To add a constant, add the header that includes it to the appropriate variable. Then, edit the regex (if necessary) to match the desired constant. Avoid making the regex too broad to avoid matching unintended constants. -### mkmerge.go +### internal/mkmerge This program is used to extract duplicate const, func, and type declarations from the generated architecture-specific files listed below, and merge these diff --git a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s new file mode 100644 index 000000000..565357288 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s @@ -0,0 +1,54 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && loong64 && gc +// +build linux +// +build loong64 +// +build gc + +#include "textflag.h" + + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 + JAL runtime·entersyscall(SB) + MOVV a1+8(FP), R4 + MOVV a2+16(FP), R5 + MOVV a3+24(FP), R6 + MOVV R0, R7 + MOVV R0, R8 + MOVV R0, R9 + MOVV trap+0(FP), R11 // syscall entry + SYSCALL + MOVV R4, r1+32(FP) + MOVV R0, r2+40(FP) // r2 is not used. Always set to 0 + JAL runtime·exitsyscall(SB) + RET + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) + +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 + MOVV a1+8(FP), R4 + MOVV a2+16(FP), R5 + MOVV a3+24(FP), R6 + MOVV R0, R7 + MOVV R0, R8 + MOVV R0, R9 + MOVV trap+0(FP), R11 // syscall entry + SYSCALL + MOVV R4, r1+32(FP) + MOVV R0, r2+40(FP) // r2 is not used. Always set to 0 + RET diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index 4362f47e2..b0f2bc4ae 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // -//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh -// +build 386 amd64 amd64p32 alpha arm arm64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh +//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh +// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go new file mode 100644 index 000000000..15721a510 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -0,0 +1,142 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux +// +build linux + +package unix + +import ( + "unsafe" +) + +// Helpers for dealing with ifreq since it contains a union and thus requires a +// lot of unsafe.Pointer casts to use properly. + +// An Ifreq is a type-safe wrapper around the raw ifreq struct. An Ifreq +// contains an interface name and a union of arbitrary data which can be +// accessed using the Ifreq's methods. To create an Ifreq, use the NewIfreq +// function. +// +// Use the Name method to access the stored interface name. The union data +// fields can be get and set using the following methods: +// - Uint16/SetUint16: flags +// - Uint32/SetUint32: ifindex, metric, mtu +type Ifreq struct{ raw ifreq } + +// NewIfreq creates an Ifreq with the input network interface name after +// validating the name does not exceed IFNAMSIZ-1 (trailing NULL required) +// bytes. +func NewIfreq(name string) (*Ifreq, error) { + // Leave room for terminating NULL byte. + if len(name) >= IFNAMSIZ { + return nil, EINVAL + } + + var ifr ifreq + copy(ifr.Ifrn[:], name) + + return &Ifreq{raw: ifr}, nil +} + +// TODO(mdlayher): get/set methods for hardware address sockaddr, char array, etc. + +// Name returns the interface name associated with the Ifreq. +func (ifr *Ifreq) Name() string { + return ByteSliceToString(ifr.raw.Ifrn[:]) +} + +// According to netdevice(7), only AF_INET addresses are returned for numerous +// sockaddr ioctls. For convenience, we expose these as Inet4Addr since the Port +// field and other data is always empty. + +// Inet4Addr returns the Ifreq union data from an embedded sockaddr as a C +// in_addr/Go []byte (4-byte IPv4 address) value. If the sockaddr family is not +// AF_INET, an error is returned. +func (ifr *Ifreq) Inet4Addr() ([]byte, error) { + raw := *(*RawSockaddrInet4)(unsafe.Pointer(&ifr.raw.Ifru[:SizeofSockaddrInet4][0])) + if raw.Family != AF_INET { + // Cannot safely interpret raw.Addr bytes as an IPv4 address. + return nil, EINVAL + } + + return raw.Addr[:], nil +} + +// SetInet4Addr sets a C in_addr/Go []byte (4-byte IPv4 address) value in an +// embedded sockaddr within the Ifreq's union data. v must be 4 bytes in length +// or an error will be returned. +func (ifr *Ifreq) SetInet4Addr(v []byte) error { + if len(v) != 4 { + return EINVAL + } + + var addr [4]byte + copy(addr[:], v) + + ifr.clear() + *(*RawSockaddrInet4)( + unsafe.Pointer(&ifr.raw.Ifru[:SizeofSockaddrInet4][0]), + ) = RawSockaddrInet4{ + // Always set IP family as ioctls would require it anyway. + Family: AF_INET, + Addr: addr, + } + + return nil +} + +// Uint16 returns the Ifreq union data as a C short/Go uint16 value. +func (ifr *Ifreq) Uint16() uint16 { + return *(*uint16)(unsafe.Pointer(&ifr.raw.Ifru[:2][0])) +} + +// SetUint16 sets a C short/Go uint16 value as the Ifreq's union data. +func (ifr *Ifreq) SetUint16(v uint16) { + ifr.clear() + *(*uint16)(unsafe.Pointer(&ifr.raw.Ifru[:2][0])) = v +} + +// Uint32 returns the Ifreq union data as a C int/Go uint32 value. +func (ifr *Ifreq) Uint32() uint32 { + return *(*uint32)(unsafe.Pointer(&ifr.raw.Ifru[:4][0])) +} + +// SetUint32 sets a C int/Go uint32 value as the Ifreq's union data. +func (ifr *Ifreq) SetUint32(v uint32) { + ifr.clear() + *(*uint32)(unsafe.Pointer(&ifr.raw.Ifru[:4][0])) = v +} + +// clear zeroes the ifreq's union field to prevent trailing garbage data from +// being sent to the kernel if an ifreq is reused. +func (ifr *Ifreq) clear() { + for i := range ifr.raw.Ifru { + ifr.raw.Ifru[i] = 0 + } +} + +// TODO(mdlayher): export as IfreqData? For now we can provide helpers such as +// IoctlGetEthtoolDrvinfo which use these APIs under the hood. + +// An ifreqData is an Ifreq which carries pointer data. To produce an ifreqData, +// use the Ifreq.withData method. +type ifreqData struct { + name [IFNAMSIZ]byte + // A type separate from ifreq is required in order to comply with the + // unsafe.Pointer rules since the "pointer-ness" of data would not be + // preserved if it were cast into the byte array of a raw ifreq. + data unsafe.Pointer + // Pad to the same size as ifreq. + _ [len(ifreq{}.Ifru) - SizeofPtr]byte +} + +// withData produces an ifreqData with the pointer p set for ioctls which require +// arbitrary pointer data. +func (ifr Ifreq) withData(p unsafe.Pointer) ifreqData { + return ifreqData{ + name: ifr.raw.Ifrn, + data: p, + } +} diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index 48773f730..884430b81 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -5,7 +5,6 @@ package unix import ( - "runtime" "unsafe" ) @@ -22,56 +21,42 @@ func IoctlRetInt(fd int, req uint) (int, error) { func IoctlGetUint32(fd int, req uint) (uint32, error) { var value uint32 - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return value, err } func IoctlGetRTCTime(fd int) (*RTCTime, error) { var value RTCTime - err := ioctl(fd, RTC_RD_TIME, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, RTC_RD_TIME, unsafe.Pointer(&value)) return &value, err } func IoctlSetRTCTime(fd int, value *RTCTime) error { - err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, RTC_SET_TIME, unsafe.Pointer(value)) } func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) { var value RTCWkAlrm - err := ioctl(fd, RTC_WKALM_RD, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, RTC_WKALM_RD, unsafe.Pointer(&value)) return &value, err } func IoctlSetRTCWkAlrm(fd int, value *RTCWkAlrm) error { - err := ioctl(fd, RTC_WKALM_SET, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err -} - -type ifreqEthtool struct { - name [IFNAMSIZ]byte - data unsafe.Pointer + return ioctlPtr(fd, RTC_WKALM_SET, unsafe.Pointer(value)) } // IoctlGetEthtoolDrvinfo fetches ethtool driver information for the network // device specified by ifname. func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { - // Leave room for terminating NULL byte. - if len(ifname) >= IFNAMSIZ { - return nil, EINVAL + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err } - value := EthtoolDrvinfo{ - Cmd: ETHTOOL_GDRVINFO, - } - ifreq := ifreqEthtool{ - data: unsafe.Pointer(&value), - } - copy(ifreq.name[:], ifname) - err := ioctl(fd, SIOCETHTOOL, uintptr(unsafe.Pointer(&ifreq))) - runtime.KeepAlive(ifreq) + value := EthtoolDrvinfo{Cmd: ETHTOOL_GDRVINFO} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd) return &value, err } @@ -80,7 +65,7 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. func IoctlGetWatchdogInfo(fd int) (*WatchdogInfo, error) { var value WatchdogInfo - err := ioctl(fd, WDIOC_GETSUPPORT, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, WDIOC_GETSUPPORT, unsafe.Pointer(&value)) return &value, err } @@ -88,6 +73,7 @@ func IoctlGetWatchdogInfo(fd int) (*WatchdogInfo, error) { // more information, see: // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. func IoctlWatchdogKeepalive(fd int) error { + // arg is ignored and not a pointer, so ioctl is fine instead of ioctlPtr. return ioctl(fd, WDIOC_KEEPALIVE, 0) } @@ -95,9 +81,7 @@ func IoctlWatchdogKeepalive(fd int) error { // range of data conveyed in value to the file associated with the file // descriptor destFd. See the ioctl_ficlonerange(2) man page for details. func IoctlFileCloneRange(destFd int, value *FileCloneRange) error { - err := ioctl(destFd, FICLONERANGE, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(destFd, FICLONERANGE, unsafe.Pointer(value)) } // IoctlFileClone performs an FICLONE ioctl operation to clone the entire file @@ -148,7 +132,7 @@ func IoctlFileDedupeRange(srcFd int, value *FileDedupeRange) error { rawinfo.Reserved = value.Info[i].Reserved } - err := ioctl(srcFd, FIDEDUPERANGE, uintptr(unsafe.Pointer(&buf[0]))) + err := ioctlPtr(srcFd, FIDEDUPERANGE, unsafe.Pointer(&buf[0])) // Output for i := range value.Info { @@ -166,31 +150,70 @@ func IoctlFileDedupeRange(srcFd int, value *FileDedupeRange) error { } func IoctlHIDGetDesc(fd int, value *HIDRawReportDescriptor) error { - err := ioctl(fd, HIDIOCGRDESC, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, HIDIOCGRDESC, unsafe.Pointer(value)) } func IoctlHIDGetRawInfo(fd int) (*HIDRawDevInfo, error) { var value HIDRawDevInfo - err := ioctl(fd, HIDIOCGRAWINFO, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, HIDIOCGRAWINFO, unsafe.Pointer(&value)) return &value, err } func IoctlHIDGetRawName(fd int) (string, error) { var value [_HIDIOCGRAWNAME_LEN]byte - err := ioctl(fd, _HIDIOCGRAWNAME, uintptr(unsafe.Pointer(&value[0]))) + err := ioctlPtr(fd, _HIDIOCGRAWNAME, unsafe.Pointer(&value[0])) return ByteSliceToString(value[:]), err } func IoctlHIDGetRawPhys(fd int) (string, error) { var value [_HIDIOCGRAWPHYS_LEN]byte - err := ioctl(fd, _HIDIOCGRAWPHYS, uintptr(unsafe.Pointer(&value[0]))) + err := ioctlPtr(fd, _HIDIOCGRAWPHYS, unsafe.Pointer(&value[0])) return ByteSliceToString(value[:]), err } func IoctlHIDGetRawUniq(fd int) (string, error) { var value [_HIDIOCGRAWUNIQ_LEN]byte - err := ioctl(fd, _HIDIOCGRAWUNIQ, uintptr(unsafe.Pointer(&value[0]))) + err := ioctlPtr(fd, _HIDIOCGRAWUNIQ, unsafe.Pointer(&value[0])) return ByteSliceToString(value[:]), err } + +// IoctlIfreq performs an ioctl using an Ifreq structure for input and/or +// output. See the netdevice(7) man page for details. +func IoctlIfreq(fd int, req uint, value *Ifreq) error { + // It is possible we will add more fields to *Ifreq itself later to prevent + // misuse, so pass the raw *ifreq directly. + return ioctlPtr(fd, req, unsafe.Pointer(&value.raw)) +} + +// TODO(mdlayher): export if and when IfreqData is exported. + +// ioctlIfreqData performs an ioctl using an ifreqData structure for input +// and/or output. See the netdevice(7) man page for details. +func ioctlIfreqData(fd int, req uint, value *ifreqData) error { + // The memory layout of IfreqData (type-safe) and ifreq (not type-safe) are + // identical so pass *IfreqData directly. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +// IoctlKCMClone attaches a new file descriptor to a multiplexor by cloning an +// existing KCM socket, returning a structure containing the file descriptor of +// the new socket. +func IoctlKCMClone(fd int) (*KCMClone, error) { + var info KCMClone + if err := ioctlPtr(fd, SIOCKCMCLONE, unsafe.Pointer(&info)); err != nil { + return nil, err + } + + return &info, nil +} + +// IoctlKCMAttach attaches a TCP socket and associated BPF program file +// descriptor to a multiplexor. +func IoctlKCMAttach(fd int, info KCMAttach) error { + return ioctlPtr(fd, SIOCKCMATTACH, unsafe.Pointer(&info)) +} + +// IoctlKCMUnattach unattaches a TCP socket file descriptor from a multiplexor. +func IoctlKCMUnattach(fd int, info KCMUnattach) error { + return ioctlPtr(fd, SIOCKCMUNATTACH, unsafe.Pointer(&info)) +} diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index 396aadf86..ee7362348 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -50,7 +50,7 @@ if [[ "$GOOS" = "linux" ]]; then # Use the Docker-based build system # Files generated through docker (use $cmd so you can Ctl-C the build or run) $cmd docker build --tag generate:$GOOS $GOOS - $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")" && /bin/pwd):/build generate:$GOOS + $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && /bin/pwd):/build generate:$GOOS exit fi diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 3f670faba..d888fb770 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -54,7 +54,7 @@ includes_AIX=' includes_Darwin=' #define _DARWIN_C_SOURCE -#define KERNEL +#define KERNEL 1 #define _DARWIN_USE_64_BIT_INODE #define __APPLE_USE_RFC_3542 #include @@ -75,6 +75,7 @@ includes_Darwin=' #include #include #include +#include #include #include #include @@ -82,6 +83,9 @@ includes_Darwin=' #include #include #include + +// for backwards compatibility because moved TIOCREMOTE to Kernel.framework after MacOSX12.0.sdk. +#define TIOCREMOTE 0x80047469 ' includes_DragonFly=' @@ -201,6 +205,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -210,6 +215,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -217,8 +223,6 @@ struct ltchars { #include #include #include -#include -#include #include #include #include @@ -229,13 +233,16 @@ struct ltchars { #include #include #include +#include #include #include +#include #include #include #include #include #include +#include #include #include #include @@ -257,6 +264,7 @@ struct ltchars { #include #include #include +#include #include #include @@ -467,7 +475,6 @@ ccflags="$@" $2 !~ /^EQUIV_/ && $2 !~ /^EXPR_/ && $2 !~ /^EVIOC/ && - $2 !~ /^EV_/ && $2 ~ /^E[A-Z0-9_]+$/ || $2 ~ /^B[0-9_]+$/ || $2 ~ /^(OLD|NEW)DEV$/ || @@ -499,10 +506,12 @@ ccflags="$@" $2 ~ /^O?XTABS$/ || $2 ~ /^TC[IO](ON|OFF)$/ || $2 ~ /^IN_/ || + $2 ~ /^KCM/ || + $2 ~ /^LANDLOCK_/ || $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL)_/ || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || $2 ~ /^RAW_PAYLOAD_/ || @@ -517,7 +526,7 @@ ccflags="$@" $2 ~ /^HW_MACHINE$/ || $2 ~ /^SYSCTL_VERS/ || $2 !~ "MNT_BITS" && - $2 ~ /^(MS|MNT|UMOUNT)_/ || + $2 ~ /^(MS|MNT|MOUNT|UMOUNT)_/ || $2 ~ /^NS_GET_/ || $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT|TFD)_/ || @@ -563,6 +572,7 @@ ccflags="$@" $2 ~ /^KEYCTL_/ || $2 ~ /^PERF_/ || $2 ~ /^SECCOMP_MODE_/ || + $2 ~ /^SEEK_/ || $2 ~ /^SPLICE_/ || $2 ~ /^SYNC_FILE_RANGE_/ || $2 !~ /^AUDIT_RECORD_MAGIC/ && @@ -591,8 +601,10 @@ ccflags="$@" $2 ~ /^DEVLINK_/ || $2 ~ /^ETHTOOL_/ || $2 ~ /^LWTUNNEL_IP/ || + $2 ~ /^ITIMER_/ || $2 !~ "WMESGLEN" && $2 ~ /^W[A-Z0-9]+$/ || + $2 ~ /^P_/ || $2 ~/^PPPIOC/ || $2 ~ /^FAN_|FANOTIFY_/ || $2 == "HID_MAX_DESCRIPTOR_SIZE" || @@ -601,6 +613,8 @@ ccflags="$@" $2 ~ /^MTD/ || $2 ~ /^OTP/ || $2 ~ /^MEM/ || + $2 ~ /^WG/ || + $2 ~ /^FIB_RULE_/ || $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go index 8bf457059..5f63147e0 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go @@ -34,3 +34,52 @@ func ParseUnixCredentials(m *SocketControlMessage) (*Ucred, error) { ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0])) return &ucred, nil } + +// PktInfo4 encodes Inet4Pktinfo into a socket control message of type IP_PKTINFO. +func PktInfo4(info *Inet4Pktinfo) []byte { + b := make([]byte, CmsgSpace(SizeofInet4Pktinfo)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_IP + h.Type = IP_PKTINFO + h.SetLen(CmsgLen(SizeofInet4Pktinfo)) + *(*Inet4Pktinfo)(h.data(0)) = *info + return b +} + +// PktInfo6 encodes Inet6Pktinfo into a socket control message of type IPV6_PKTINFO. +func PktInfo6(info *Inet6Pktinfo) []byte { + b := make([]byte, CmsgSpace(SizeofInet6Pktinfo)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_IPV6 + h.Type = IPV6_PKTINFO + h.SetLen(CmsgLen(SizeofInet6Pktinfo)) + *(*Inet6Pktinfo)(h.data(0)) = *info + return b +} + +// ParseOrigDstAddr decodes a socket control message containing the original +// destination address. To receive such a message the IP_RECVORIGDSTADDR or +// IPV6_RECVORIGDSTADDR option must be enabled on the socket. +func ParseOrigDstAddr(m *SocketControlMessage) (Sockaddr, error) { + switch { + case m.Header.Level == SOL_IP && m.Header.Type == IP_ORIGDSTADDR: + pp := (*RawSockaddrInet4)(unsafe.Pointer(&m.Data[0])) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.Addr = pp.Addr + return sa, nil + + case m.Header.Level == SOL_IPV6 && m.Header.Type == IPV6_ORIGDSTADDR: + pp := (*RawSockaddrInet6)(unsafe.Pointer(&m.Data[0])) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + sa.Addr = pp.Addr + return sa, nil + + default: + return nil, EINVAL + } +} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index d8efb715f..ad22c33db 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -37,6 +37,7 @@ func Creat(path string, mode uint32) (fd int, err error) { } //sys utimes(path string, times *[2]Timeval) (err error) + func Utimes(path string, tv []Timeval) error { if len(tv) != 2 { return EINVAL @@ -45,6 +46,7 @@ func Utimes(path string, tv []Timeval) error { } //sys utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) + func UtimesNano(path string, ts []Timespec) error { if len(ts) != 2 { return EINVAL @@ -70,9 +72,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil } @@ -85,9 +85,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil } @@ -219,18 +217,12 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { return } -func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { +func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { // Recvmsg not implemented on AIX - sa := new(SockaddrUnix) - return -1, -1, -1, sa, ENOSYS + return -1, -1, -1, ENOSYS } -func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { - _, err = SendmsgN(fd, p, oob, to, flags) - return -} - -func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { +func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { // SendmsgN not implemented on AIX return -1, ENOSYS } @@ -261,9 +253,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_INET6: @@ -272,9 +262,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } return nil, EAFNOSUPPORT @@ -314,11 +302,13 @@ func direntNamlen(buf []byte) (uint64, bool) { } //sys getdirent(fd int, buf []byte) (n int, err error) + func Getdents(fd int, buf []byte) (n int, err error) { return getdirent(fd, buf) } //sys wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, err error) + func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { var status _C_int var r Pid_t @@ -385,6 +375,12 @@ func (w WaitStatus) TrapCause() int { return -1 } //sys fcntl(fd int, cmd int, arg int) (val int, err error) +//sys fsyncRange(fd int, how int, start int64, length int64) (err error) = fsync_range + +func Fsync(fd int) error { + return fsyncRange(fd, O_SYNC, 0, 0) +} + /* * Direct access */ @@ -401,7 +397,6 @@ func (w WaitStatus) TrapCause() int { return -1 } //sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Fdatasync(fd int) (err error) -//sys Fsync(fd int) (err error) // readdir_r //sysnb Getpgid(pid int) (pgid int, err error) @@ -462,8 +457,8 @@ func (w WaitStatus) TrapCause() int { return -1 } //sys Listen(s int, n int) (err error) //sys lstat(path string, stat *Stat_t) (err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = pread64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = pwrite64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = pread64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = pwrite64 //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) //sysnb Setregid(rgid int, egid int) (err error) @@ -523,8 +518,10 @@ func Pipe(p []int) (err error) { } var pp [2]_C_int err = pipe(&pp) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return } @@ -544,6 +541,7 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { //sys Getsystemcfg(label int) (n uint64) //sys umount(target string) (err error) + func Unmount(target string, flags int) (err error) { if flags != 0 { // AIX doesn't have any flags for umount. diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 95ac3946b..9c87c5f07 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -163,9 +163,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -179,9 +177,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -210,9 +206,7 @@ func (sa *SockaddrDatalink) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Nlen = sa.Nlen sa.raw.Alen = sa.Alen sa.raw.Slen = sa.Slen - for i := 0; i < len(sa.raw.Data); i++ { - sa.raw.Data[i] = sa.Data[i] - } + sa.raw.Data = sa.Data return unsafe.Pointer(&sa.raw), SizeofSockaddrDatalink, nil } @@ -228,9 +222,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa.Nlen = pp.Nlen sa.Alen = pp.Alen sa.Slen = pp.Slen - for i := 0; i < len(sa.Data); i++ { - sa.Data[i] = pp.Data[i] - } + sa.Data = pp.Data return sa, nil case AF_UNIX: @@ -262,9 +254,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_INET6: @@ -273,9 +263,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } return anyToSockaddrGOOS(fd, rsa) @@ -337,10 +325,9 @@ func GetsockoptString(fd, level, opt int) (string, error) { //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) -func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { +func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { var msg Msghdr - var rsa RawSockaddrAny - msg.Name = (*byte)(unsafe.Pointer(&rsa)) + msg.Name = (*byte)(unsafe.Pointer(rsa)) msg.Namelen = uint32(SizeofSockaddrAny) var iov Iovec if len(p) > 0 { @@ -364,29 +351,12 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from } oobn = int(msg.Controllen) recvflags = int(msg.Flags) - // source address is only specified if the socket is unconnected - if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(fd, &rsa) - } return } //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) -func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { - _, err = SendmsgN(fd, p, oob, to, flags) - return -} - -func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { - var ptr unsafe.Pointer - var salen _Socklen - if to != nil { - ptr, salen, err = to.sockaddr() - if err != nil { - return 0, err - } - } +func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(ptr)) msg.Namelen = uint32(salen) @@ -583,12 +553,7 @@ func UtimesNano(path string, ts []Timespec) error { if len(ts) != 2 { return EINVAL } - // Darwin setattrlist can set nanosecond timestamps - err := setattrlistTimes(path, ts, 0) - if err != ENOSYS { - return err - } - err = utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) + err := utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) if err != ENOSYS { return err } @@ -608,10 +573,6 @@ func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { if len(ts) != 2 { return EINVAL } - err := setattrlistTimes(path, ts, flags) - if err != ENOSYS { - return err - } return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) } diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 9945e5f96..4f87f16ea 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -13,6 +13,7 @@ package unix import ( + "fmt" "runtime" "syscall" "unsafe" @@ -47,6 +48,30 @@ func (sa *SockaddrCtl) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrCtl, nil } +// SockaddrVM implements the Sockaddr interface for AF_VSOCK type sockets. +// SockaddrVM provides access to Darwin VM sockets: a mechanism that enables +// bidirectional communication between a hypervisor and its guest virtual +// machines. +type SockaddrVM struct { + // CID and Port specify a context ID and port address for a VM socket. + // Guests have a unique CID, and hosts may have a well-known CID of: + // - VMADDR_CID_HYPERVISOR: refers to the hypervisor process. + // - VMADDR_CID_LOCAL: refers to local communication (loopback). + // - VMADDR_CID_HOST: refers to other processes on the host. + CID uint32 + Port uint32 + raw RawSockaddrVM +} + +func (sa *SockaddrVM) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Len = SizeofSockaddrVM + sa.raw.Family = AF_VSOCK + sa.raw.Port = sa.Port + sa.raw.Cid = sa.CID + + return unsafe.Pointer(&sa.raw), SizeofSockaddrVM, nil +} + func anyToSockaddrGOOS(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_SYSTEM: @@ -57,6 +82,13 @@ func anyToSockaddrGOOS(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa.Unit = pp.Sc_unit return sa, nil } + case AF_VSOCK: + pp := (*RawSockaddrVM)(unsafe.Pointer(rsa)) + sa := &SockaddrVM{ + CID: pp.Cid, + Port: pp.Port, + } + return sa, nil } return nil, EAFNOSUPPORT } @@ -109,16 +141,6 @@ func direntNamlen(buf []byte) (uint64, bool) { func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) } -type attrList struct { - bitmapCount uint16 - _ uint16 - CommonAttr uint32 - VolAttr uint32 - DirAttr uint32 - FileAttr uint32 - Forkattr uint32 -} - //sysnb pipe(p *[2]int32) (err error) func Pipe(p []int) (err error) { @@ -127,8 +149,10 @@ func Pipe(p []int) (err error) { } var x [2]int32 err = pipe(&x) - p[0] = int(x[0]) - p[1] = int(x[1]) + if err == nil { + p[0] = int(x[0]) + p[1] = int(x[1]) + } return } @@ -248,36 +272,7 @@ func Flistxattr(fd int, dest []byte) (sz int, err error) { return flistxattr(fd, xattrPointer(dest), len(dest), 0) } -func setattrlistTimes(path string, times []Timespec, flags int) error { - _p0, err := BytePtrFromString(path) - if err != nil { - return err - } - - var attrList attrList - attrList.bitmapCount = ATTR_BIT_MAP_COUNT - attrList.CommonAttr = ATTR_CMN_MODTIME | ATTR_CMN_ACCTIME - - // order is mtime, atime: the opposite of Chtimes - attributes := [2]Timespec{times[1], times[0]} - options := 0 - if flags&AT_SYMLINK_NOFOLLOW != 0 { - options |= FSOPT_NOFOLLOW - } - return setattrlist( - _p0, - unsafe.Pointer(&attrList), - unsafe.Pointer(&attributes), - unsafe.Sizeof(attributes), - options) -} - -//sys setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) error { - // Darwin doesn't support SYS_UTIMENSAT - return ENOSYS -} +//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) /* * Wrapped @@ -398,8 +393,69 @@ func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { return x, err } +func GetsockoptTCPConnectionInfo(fd, level, opt int) (*TCPConnectionInfo, error) { + var value TCPConnectionInfo + vallen := _Socklen(SizeofTCPConnectionInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func SysctlKinfoProc(name string, args ...int) (*KinfoProc, error) { + mib, err := sysctlmib(name, args...) + if err != nil { + return nil, err + } + + var kinfo KinfoProc + n := uintptr(SizeofKinfoProc) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&kinfo)), &n, nil, 0); err != nil { + return nil, err + } + if n != SizeofKinfoProc { + return nil, EIO + } + return &kinfo, nil +} + +func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { + mib, err := sysctlmib(name, args...) + if err != nil { + return nil, err + } + + // Find size. + n := uintptr(0) + if err := sysctl(mib, nil, &n, nil, 0); err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + if n%SizeofKinfoProc != 0 { + return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) + } + + // Read into buffer of that size. + buf := make([]KinfoProc, n/SizeofKinfoProc) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil { + return nil, err + } + if n%SizeofKinfoProc != 0 { + return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) + } + + // The actual call may return less than the original reported required + // size so ensure we deal with that. + return buf[:n/SizeofKinfoProc], nil +} + //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) +//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) +//sys shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) +//sys shmdt(addr uintptr) (err error) +//sys shmget(key int, size int, flag int) (id int, err error) + /* * Exposed directly */ @@ -455,11 +511,12 @@ func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) //sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) @@ -523,7 +580,6 @@ func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { // Nfssvc // Getfh // Quotactl -// Mount // Csops // Waitid // Add_profil @@ -557,10 +613,6 @@ func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { // Msgget // Msgsnd // Msgrcv -// Shmat -// Shmctl -// Shmdt -// Shmget // Shm_open // Shm_unlink // Sem_open diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 5af108a50..61c0d0de1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -101,7 +101,10 @@ func Pipe(p []int) (err error) { if len(p) != 2 { return EINVAL } - p[0], p[1], err = pipe() + r, w, err := pipe() + if err == nil { + p[0], p[1] = r, w + } return } @@ -114,17 +117,22 @@ func Pipe2(p []int, flags int) (err error) { var pp [2]_C_int // pipe2 on dragonfly takes an fds array as an argument, but still // returns the file descriptors. - p[0], p[1], err = pipe2(&pp, flags) + r, w, err := pipe2(&pp, flags) + if err == nil { + p[0], p[1] = r, w + } return err } //sys extpread(fd int, p []byte, flags int, offset int64) (n int, err error) -func Pread(fd int, p []byte, offset int64) (n int, err error) { + +func pread(fd int, p []byte, offset int64) (n int, err error) { return extpread(fd, p, 0, offset) } //sys extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error) -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + +func pwrite(fd int, p []byte, offset int64) (n int, err error) { return extpwrite(fd, p, 0, offset) } @@ -163,11 +171,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } -func setattrlistTimes(path string, times []Timespec, flags int) error { - // used on Darwin for UtimesNano - return ENOSYS -} - //sys ioctl(fd int, req uint, arg uintptr) (err error) //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 18c392cf3..6f6c510f4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -110,8 +110,10 @@ func Pipe2(p []int, flags int) error { } var pp [2]_C_int err := pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return err } @@ -192,11 +194,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } -func setattrlistTimes(path string, times []Timespec, flags int) error { - // used on Darwin for UtimesNano - return ENOSYS -} - //sys ioctl(fd int, req uint, arg uintptr) (err error) //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL @@ -636,8 +633,8 @@ func PtraceSingleStep(pid int) (err error) { //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(fdat int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) //sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go index 8c5357683..8d5f294c4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -162,6 +162,14 @@ func (l *Lifreq) GetLifruInt() int { return *(*int)(unsafe.Pointer(&l.Lifru[0])) } +func (l *Lifreq) SetLifruUint(d uint) { + *(*uint)(unsafe.Pointer(&l.Lifru[0])) = d +} + +func (l *Lifreq) GetLifruUint() uint { + return *(*uint)(unsafe.Pointer(&l.Lifru[0])) +} + func IoctlLifreq(fd int, req uint, l *Lifreq) error { return ioctl(fd, req, uintptr(unsafe.Pointer(l))) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 41b91fdfb..c8d203212 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -13,8 +13,8 @@ package unix import ( "encoding/binary" - "runtime" "syscall" + "time" "unsafe" ) @@ -38,6 +38,13 @@ func Creat(path string, mode uint32) (fd int, err error) { return Open(path, O_CREAT|O_WRONLY|O_TRUNC, mode) } +func EpollCreate(size int) (fd int, err error) { + if size <= 0 { + return -1, EINVAL + } + return EpollCreate1(0) +} + //sys FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) //sys fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) @@ -66,11 +73,22 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { return fchmodat(dirfd, path, mode) } -//sys ioctl(fd int, req uint, arg uintptr) (err error) +func InotifyInit() (fd int, err error) { + return InotifyInit1(0) +} + +//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. -// These are defined in ioctl.go and ioctl_linux.go. +// ioctl itself should not be exposed directly, but additional get/set functions +// for specific types are permissible. These are defined in ioctl.go and +// ioctl_linux.go. +// +// The third argument to ioctl is often a pointer but sometimes an integer. +// Callers should use ioctlPtr when the third argument is a pointer and ioctl +// when the third argument is an integer. +// +// TODO: some existing code incorrectly uses ioctl when it should use ioctlPtr. //sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) @@ -102,6 +120,25 @@ func Openat2(dirfd int, path string, how *OpenHow) (fd int, err error) { return openat2(dirfd, path, how, SizeofOpenHow) } +func Pipe(p []int) error { + return Pipe2(p, 0) +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) error { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err := pipe2(&pp, flags) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } + return err +} + //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { @@ -111,6 +148,15 @@ func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error return ppoll(&fds[0], len(fds), timeout, sigmask) } +func Poll(fds []PollFd, timeout int) (n int, err error) { + var ts *Timespec + if timeout >= 0 { + ts = new(Timespec) + *ts = NsecToTimespec(int64(timeout) * 1e6) + } + return Ppoll(fds, ts, nil) +} + //sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) func Readlink(path string, buf []byte) (n int, err error) { @@ -161,27 +207,7 @@ func Utimes(path string, tv []Timeval) error { //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) func UtimesNano(path string, ts []Timespec) error { - if ts == nil { - err := utimensat(AT_FDCWD, path, nil, 0) - if err != ENOSYS { - return err - } - return utimes(path, nil) - } - if len(ts) != 2 { - return EINVAL - } - err := utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) - if err != ENOSYS { - return err - } - // If the utimensat syscall isn't available (utimensat was added to Linux - // in 2.6.22, Released, 8 July 2007) then fall back to utimes - var tv [2]Timeval - for i := 0; i < 2; i++ { - tv[i] = NsecToTimeval(TimespecToNsec(ts[i])) - } - return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) + return UtimesNanoAt(AT_FDCWD, path, ts, 0) } func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { @@ -224,6 +250,13 @@ func Getwd() (wd string, err error) { if n < 1 || n > len(buf) || buf[n-1] != 0 { return "", EINVAL } + // In some cases, Linux can return a path that starts with the + // "(unreachable)" prefix, which can potentially be a valid relative + // path. To work around that, return ENOENT if path is not absolute. + if buf[0] != '/' { + return "", ENOENT + } + return string(buf[0 : n-1]), nil } @@ -333,6 +366,8 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, return } +//sys Waitid(idType int, id int, info *Siginfo, options int, rusage *Rusage) (err error) + func Mkfifo(path string, mode uint32) error { return Mknod(path, mode|S_IFIFO, 0) } @@ -349,9 +384,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil } @@ -364,9 +397,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil } @@ -415,9 +446,7 @@ func (sa *SockaddrLinklayer) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Hatype = sa.Hatype sa.raw.Pkttype = sa.Pkttype sa.raw.Halen = sa.Halen - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrLinklayer, nil } @@ -483,24 +512,24 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) { // // Server example: // -// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) -// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{ -// Channel: 1, -// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00 -// }) -// _ = Listen(fd, 1) -// nfd, sa, _ := Accept(fd) -// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd) -// Read(nfd, buf) +// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) +// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{ +// Channel: 1, +// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00 +// }) +// _ = Listen(fd, 1) +// nfd, sa, _ := Accept(fd) +// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd) +// Read(nfd, buf) // // Client example: // -// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) -// _ = Connect(fd, &SockaddrRFCOMM{ -// Channel: 1, -// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11 -// }) -// Write(fd, []byte(`hello`)) +// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) +// _ = Connect(fd, &SockaddrRFCOMM{ +// Channel: 1, +// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11 +// }) +// Write(fd, []byte(`hello`)) type SockaddrRFCOMM struct { // Addr represents a bluetooth address, byte ordering is little-endian. Addr [6]uint8 @@ -527,12 +556,12 @@ func (sa *SockaddrRFCOMM) sockaddr() (unsafe.Pointer, _Socklen, error) { // The SockaddrCAN struct must be bound to the socket file descriptor // using Bind before the CAN socket can be used. // -// // Read one raw CAN frame -// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW) -// addr := &SockaddrCAN{Ifindex: index} -// Bind(fd, addr) -// frame := make([]byte, 16) -// Read(fd, frame) +// // Read one raw CAN frame +// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW) +// addr := &SockaddrCAN{Ifindex: index} +// Bind(fd, addr) +// frame := make([]byte, 16) +// Read(fd, frame) // // The full SocketCAN documentation can be found in the linux kernel // archives at: https://www.kernel.org/doc/Documentation/networking/can.txt @@ -603,13 +632,13 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) { // Here is an example of using an AF_ALG socket with SHA1 hashing. // The initial socket setup process is as follows: // -// // Open a socket to perform SHA1 hashing. -// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0) -// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"} -// unix.Bind(fd, addr) -// // Note: unix.Accept does not work at this time; must invoke accept() -// // manually using unix.Syscall. -// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0) +// // Open a socket to perform SHA1 hashing. +// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0) +// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"} +// unix.Bind(fd, addr) +// // Note: unix.Accept does not work at this time; must invoke accept() +// // manually using unix.Syscall. +// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0) // // Once a file descriptor has been returned from Accept, it may be used to // perform SHA1 hashing. The descriptor is not safe for concurrent use, but @@ -618,39 +647,39 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) { // When hashing a small byte slice or string, a single Write and Read may // be used: // -// // Assume hashfd is already configured using the setup process. -// hash := os.NewFile(hashfd, "sha1") -// // Hash an input string and read the results. Each Write discards -// // previous hash state. Read always reads the current state. -// b := make([]byte, 20) -// for i := 0; i < 2; i++ { -// io.WriteString(hash, "Hello, world.") -// hash.Read(b) -// fmt.Println(hex.EncodeToString(b)) -// } -// // Output: -// // 2ae01472317d1935a84797ec1983ae243fc6aa28 -// // 2ae01472317d1935a84797ec1983ae243fc6aa28 +// // Assume hashfd is already configured using the setup process. +// hash := os.NewFile(hashfd, "sha1") +// // Hash an input string and read the results. Each Write discards +// // previous hash state. Read always reads the current state. +// b := make([]byte, 20) +// for i := 0; i < 2; i++ { +// io.WriteString(hash, "Hello, world.") +// hash.Read(b) +// fmt.Println(hex.EncodeToString(b)) +// } +// // Output: +// // 2ae01472317d1935a84797ec1983ae243fc6aa28 +// // 2ae01472317d1935a84797ec1983ae243fc6aa28 // // For hashing larger byte slices, or byte streams such as those read from // a file or socket, use Sendto with MSG_MORE to instruct the kernel to update // the hash digest instead of creating a new one for a given chunk and finalizing it. // -// // Assume hashfd and addr are already configured using the setup process. -// hash := os.NewFile(hashfd, "sha1") -// // Hash the contents of a file. -// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz") -// b := make([]byte, 4096) -// for { -// n, err := f.Read(b) -// if err == io.EOF { -// break -// } -// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr) -// } -// hash.Read(b) -// fmt.Println(hex.EncodeToString(b)) -// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5 +// // Assume hashfd and addr are already configured using the setup process. +// hash := os.NewFile(hashfd, "sha1") +// // Hash the contents of a file. +// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz") +// b := make([]byte, 4096) +// for { +// n, err := f.Read(b) +// if err == io.EOF { +// break +// } +// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr) +// } +// hash.Read(b) +// fmt.Println(hex.EncodeToString(b)) +// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5 // // For more information, see: http://www.chronox.de/crypto-API/crypto/userspace-if.html. type SockaddrALG struct { @@ -832,12 +861,10 @@ func (sa *SockaddrTIPC) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Addr == nil { return nil, 0, EINVAL } - sa.raw.Family = AF_TIPC sa.raw.Scope = int8(sa.Scope) sa.raw.Addrtype = sa.Addr.tipcAddrtype() sa.raw.Addr = sa.Addr.tipcAddr() - return unsafe.Pointer(&sa.raw), SizeofSockaddrTIPC, nil } @@ -851,9 +878,7 @@ type SockaddrL2TPIP struct { func (sa *SockaddrL2TPIP) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_INET sa.raw.Conn_id = sa.ConnId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP, nil } @@ -869,9 +894,7 @@ func (sa *SockaddrL2TPIP6) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_INET6 sa.raw.Conn_id = sa.ConnId sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP6, nil } @@ -967,9 +990,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa.Hatype = pp.Hatype sa.Pkttype = pp.Pkttype sa.Halen = pp.Halen - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_UNIX: @@ -1008,18 +1029,14 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { pp := (*RawSockaddrL2TPIP)(unsafe.Pointer(rsa)) sa := new(SockaddrL2TPIP) sa.ConnId = pp.Conn_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil default: pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } @@ -1035,9 +1052,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrL2TPIP6) sa.ConnId = pp.Conn_id sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil default: pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) @@ -1045,9 +1060,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } @@ -1222,11 +1235,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { func Accept(fd int) (nfd int, sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny - // Try accept4 first for Android, then try accept for kernel older than 2.6.28 nfd, err = accept4(fd, &rsa, &len, 0) - if err == ENOSYS { - nfd, err = accept(fd, &rsa, &len) - } if err != nil { return } @@ -1348,6 +1357,13 @@ func SetsockoptTpacketReq3(fd, level, opt int, tp *TpacketReq3) error { return setsockopt(fd, level, opt, unsafe.Pointer(tp), unsafe.Sizeof(*tp)) } +func SetsockoptTCPRepairOpt(fd, level, opt int, o []TCPRepairOpt) (err error) { + if len(o) == 0 { + return EINVAL + } + return setsockopt(fd, level, opt, unsafe.Pointer(&o[0]), uintptr(SizeofTCPRepairOpt*len(o))) +} + // Keyctl Commands (http://man7.org/linux/man-pages/man2/keyctl.2.html) // KeyctlInt calls keyctl commands in which each argument is an int. @@ -1483,10 +1499,9 @@ func KeyctlRestrictKeyring(ringid int, keyType string, restriction string) error //sys keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) = SYS_KEYCTL //sys keyctlRestrictKeyring(cmd int, arg2 int) (err error) = SYS_KEYCTL -func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { +func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { var msg Msghdr - var rsa RawSockaddrAny - msg.Name = (*byte)(unsafe.Pointer(&rsa)) + msg.Name = (*byte)(unsafe.Pointer(rsa)) msg.Namelen = uint32(SizeofSockaddrAny) var iov Iovec if len(p) > 0 { @@ -1517,28 +1532,10 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from } oobn = int(msg.Controllen) recvflags = int(msg.Flags) - // source address is only specified if the socket is unconnected - if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(fd, &rsa) - } - return -} - -func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { - _, err = SendmsgN(fd, p, oob, to, flags) return } -func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { - var ptr unsafe.Pointer - var salen _Socklen - if to != nil { - var err error - ptr, salen, err = to.sockaddr() - if err != nil { - return 0, err - } - } +func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { var msg Msghdr msg.Name = (*byte)(ptr) msg.Namelen = uint32(salen) @@ -1771,6 +1768,16 @@ func Mount(source string, target string, fstype string, flags uintptr, data stri return mount(source, target, fstype, flags, datap) } +//sys mountSetattr(dirfd int, pathname string, flags uint, attr *MountAttr, size uintptr) (err error) = SYS_MOUNT_SETATTR + +// MountSetattr is a wrapper for mount_setattr(2). +// https://man7.org/linux/man-pages/man2/mount_setattr.2.html +// +// Requires kernel >= 5.12. +func MountSetattr(dirfd int, pathname string, flags uint, attr *MountAttr) error { + return mountSetattr(dirfd, pathname, flags, attr, unsafe.Sizeof(*attr)) +} + func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) @@ -1802,11 +1809,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Dup(oldfd int) (fd int, err error) func Dup2(oldfd, newfd int) error { - // Android O and newer blocks dup2; riscv and arm64 don't implement dup2. - if runtime.GOOS == "android" || runtime.GOARCH == "riscv64" || runtime.GOARCH == "arm64" { - return Dup3(oldfd, newfd, 0) - } - return dup2(oldfd, newfd) + return Dup3(oldfd, newfd, 0) } //sys Dup3(oldfd int, newfd int, flags int) (err error) @@ -1826,6 +1829,9 @@ func Dup2(oldfd, newfd int) error { //sys Fremovexattr(fd int, attr string) (err error) //sys Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) //sys Fsync(fd int) (err error) +//sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) +//sys Fsopen(fsName string, flags int) (fd int, err error) +//sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) @@ -1856,10 +1862,12 @@ func Getpgrp() (pid int) { //sys MemfdCreate(name string, flags int) (fd int, err error) //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) +//sys MoveMount(fromDirfd int, fromPathName string, toDirfd int, toPathName string, flags int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) +//sys OpenTree(dfd int, fileName string, flags uint) (r int, err error) //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT -//sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 +//sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) //sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 //sys read(fd int, p []byte) (n int, err error) @@ -2181,7 +2189,7 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { gid = Getgid() } - if uint32(gid) == st.Gid || isGroupMember(gid) { + if uint32(gid) == st.Gid || isGroupMember(int(st.Gid)) { fmode = (st.Mode >> 3) & 7 } else { fmode = st.Mode & 7 @@ -2294,11 +2302,65 @@ type RemoteIovec struct { //sys ProcessVMReadv(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) = SYS_PROCESS_VM_READV //sys ProcessVMWritev(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) = SYS_PROCESS_VM_WRITEV +//sys PidfdOpen(pid int, flags int) (fd int, err error) = SYS_PIDFD_OPEN +//sys PidfdGetfd(pidfd int, targetfd int, flags int) (fd int, err error) = SYS_PIDFD_GETFD +//sys PidfdSendSignal(pidfd int, sig Signal, info *Siginfo, flags int) (err error) = SYS_PIDFD_SEND_SIGNAL + +//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) +//sys shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) +//sys shmdt(addr uintptr) (err error) +//sys shmget(key int, size int, flag int) (id int, err error) + +//sys getitimer(which int, currValue *Itimerval) (err error) +//sys setitimer(which int, newValue *Itimerval, oldValue *Itimerval) (err error) + +// MakeItimerval creates an Itimerval from interval and value durations. +func MakeItimerval(interval, value time.Duration) Itimerval { + return Itimerval{ + Interval: NsecToTimeval(interval.Nanoseconds()), + Value: NsecToTimeval(value.Nanoseconds()), + } +} + +// A value which may be passed to the which parameter for Getitimer and +// Setitimer. +type ItimerWhich int + +// Possible which values for Getitimer and Setitimer. +const ( + ItimerReal ItimerWhich = ITIMER_REAL + ItimerVirtual ItimerWhich = ITIMER_VIRTUAL + ItimerProf ItimerWhich = ITIMER_PROF +) + +// Getitimer wraps getitimer(2) to return the current value of the timer +// specified by which. +func Getitimer(which ItimerWhich) (Itimerval, error) { + var it Itimerval + if err := getitimer(int(which), &it); err != nil { + return Itimerval{}, err + } + + return it, nil +} + +// Setitimer wraps setitimer(2) to arm or disarm the timer specified by which. +// It returns the previous value of the timer. +// +// If the Itimerval argument is the zero value, the timer will be disarmed. +func Setitimer(which ItimerWhich, it Itimerval) (Itimerval, error) { + var prev Itimerval + if err := setitimer(int(which), &it, &prev); err != nil { + return Itimerval{}, err + } + + return prev, nil +} + /* * Unimplemented */ // AfsSyscall -// Alarm // ArchPrctl // Brk // ClockNanosleep @@ -2314,7 +2376,6 @@ type RemoteIovec struct { // GetMempolicy // GetRobustList // GetThreadArea -// Getitimer // Getpmsg // IoCancel // IoDestroy @@ -2375,10 +2436,6 @@ type RemoteIovec struct { // SetRobustList // SetThreadArea // SetTidAddress -// Shmat -// Shmctl -// Shmdt -// Shmget // Sigaltstack // Swapoff // Swapon @@ -2396,5 +2453,4 @@ type RemoteIovec struct { // Vfork // Vhangup // Vserver -// Waitid // _Sysctl diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index b430536c8..518e476e6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -19,36 +19,8 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: int32(sec), Usec: int32(usec)} } -//sysnb pipe(p *[2]_C_int) (err error) - -func Pipe(p []int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe(&pp) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -//sysnb pipe2(p *[2]_C_int, flags int) (err error) - -func Pipe2(p []int, flags int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - // 64-bit file system and 32-bit uid calls // (386 default is 32-bit file system and 16-bit uid). -//sys dup2(oldfd int, newfd int) (err error) -//sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64_64 //sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32 @@ -59,13 +31,12 @@ func Pipe2(p []int, flags int) (err error) { //sysnb Geteuid() (euid int) = SYS_GETEUID32 //sysnb Getgid() (gid int) = SYS_GETGID32 //sysnb Getuid() (uid int) = SYS_GETUID32 -//sysnb InotifyInit() (fd int, err error) //sys Ioperm(from int, num int, on int) (err error) //sys Iopl(level int) (err error) //sys Lchown(path string, uid int, gid int) (err error) = SYS_LCHOWN32 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 //sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32 @@ -105,7 +76,7 @@ const rlimInf32 = ^uint32(0) const rlimInf64 = ^uint64(0) func Getrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, nil, rlim) + err = Prlimit(0, resource, nil, rlim) if err != ENOSYS { return err } @@ -133,7 +104,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { //sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, rlim, nil) + err = Prlimit(0, resource, rlim, nil) if err != ENOSYS { return err } @@ -202,14 +173,6 @@ const ( _SENDMMSG = 20 ) -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - fd, e := socketcall(_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) - if e != 0 { - err = e - } - return -} - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { fd, e := socketcall(_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) if e != 0 { @@ -381,12 +344,3 @@ func (cmsg *Cmsghdr) SetLen(length int) { func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { rsa.Service_name_len = uint32(length) } - -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go new file mode 100644 index 000000000..08086ac6a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go @@ -0,0 +1,14 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (386 || amd64 || mips || mipsle || mips64 || mipsle || ppc64 || ppc64le || ppc || s390x || sparc64) +// +build linux +// +build 386 amd64 mips mipsle mips64 mipsle ppc64 ppc64le ppc s390x sparc64 + +package unix + +// SYS_ALARM is not defined on arm or riscv, but is available for other GOARCH +// values. + +//sys Alarm(seconds uint) (remaining uint, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 85cd97da0..f5e9d6bef 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -7,8 +7,6 @@ package unix -//sys dup2(oldfd int, newfd int) (err error) -//sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) @@ -21,17 +19,6 @@ package unix //sysnb Getgid() (gid int) //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) //sysnb Getuid() (uid int) -//sysnb inotifyInit() (fd int, err error) - -func InotifyInit() (fd int, err error) { - // First try inotify_init1, because Android's seccomp policy blocks the latter. - fd, err = InotifyInit1(0) - if err == ENOSYS { - fd, err = inotifyInit() - } - return -} - //sys Ioperm(from int, num int, on int) (err error) //sys Iopl(level int) (err error) //sys Lchown(path string, uid int, gid int) (err error) @@ -41,9 +28,10 @@ func Lstat(path string, stat *Stat_t) (err error) { return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW) } +//sys MemfdSecret(flags int) (fd int, err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK @@ -75,7 +63,6 @@ func Stat(path string, stat *Stat_t) (err error) { //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) //sys Ustat(dev int, ubuf *Ustat_t) (err error) -//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) @@ -126,32 +113,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: sec, Usec: usec} } -//sysnb pipe(p *[2]_C_int) (err error) - -func Pipe(p []int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe(&pp) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -//sysnb pipe2(p *[2]_C_int, flags int) (err error) - -func Pipe2(p []int, flags int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - func (r *PtraceRegs) PC() uint64 { return r.Rip } func (r *PtraceRegs) SetPC(pc uint64) { r.Rip = pc } @@ -176,15 +137,6 @@ func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { rsa.Service_name_len = uint64(length) } -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} - //sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 39a864d4e..c1a7778f1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -19,36 +19,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: int32(sec), Usec: int32(usec)} } -//sysnb pipe(p *[2]_C_int) (err error) - -func Pipe(p []int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - // Try pipe2 first for Android O, then try pipe for kernel 2.6.23. - err = pipe2(&pp, 0) - if err == ENOSYS { - err = pipe(&pp) - } - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -//sysnb pipe2(p *[2]_C_int, flags int) (err error) - -func Pipe2(p []int, flags int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { newoffset, errno := seek(fd, offset, whence) if errno != 0 { @@ -57,7 +27,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return newoffset, nil } -//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) @@ -76,8 +45,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // 64-bit file system and 32-bit uid calls // (16-bit uid calls are not always supported in newer kernels) -//sys dup2(oldfd int, newfd int) (err error) -//sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32 //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 @@ -86,7 +53,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { //sysnb Geteuid() (euid int) = SYS_GETEUID32 //sysnb Getgid() (gid int) = SYS_GETGID32 //sysnb Getuid() (uid int) = SYS_GETUID32 -//sysnb InotifyInit() (fd int, err error) //sys Lchown(path string, uid int, gid int) (err error) = SYS_LCHOWN32 //sys Listen(s int, n int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 @@ -130,8 +96,8 @@ func Utime(path string, buf *Utimbuf) error { //sys utimes(path string, times *[2]Timeval) (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 //sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64 @@ -184,7 +150,7 @@ const rlimInf32 = ^uint32(0) const rlimInf64 = ^uint64(0) func Getrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, nil, rlim) + err = Prlimit(0, resource, nil, rlim) if err != ENOSYS { return err } @@ -212,7 +178,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { //sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, rlim, nil) + err = Prlimit(0, resource, rlim, nil) if err != ENOSYS { return err } @@ -260,15 +226,6 @@ func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { rsa.Service_name_len = uint32(length) } -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} - //sys armSyncFileRange(fd int, flags int, off int64, n int64) (err error) = SYS_ARM_SYNC_FILE_RANGE func SyncFileRange(fd int, off int64, n int64, flags int) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 7f27ebf2f..d83e2c657 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -9,13 +9,6 @@ package unix import "unsafe" -func EpollCreate(size int) (fd int, err error) { - if size <= 0 { - return -1, EINVAL - } - return EpollCreate1(0) -} - //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) @@ -29,8 +22,9 @@ func EpollCreate(size int) (fd int, err error) { //sysnb getrlimit(resource int, rlim *Rlimit) (err error) //sysnb Getuid() (uid int) //sys Listen(s int, n int) (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys MemfdSecret(flags int) (fd int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK @@ -73,7 +67,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { return ENOSYS } -//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) @@ -145,33 +138,9 @@ func utimes(path string, tv *[2]Timeval) (err error) { return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) } -func Pipe(p []int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, 0) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -//sysnb pipe2(p *[2]_C_int, flags int) (err error) - -func Pipe2(p []int, flags int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - // Getrlimit prefers the prlimit64 system call. See issue 38604. func Getrlimit(resource int, rlim *Rlimit) error { - err := prlimit(0, resource, nil, rlim) + err := Prlimit(0, resource, nil, rlim) if err != ENOSYS { return err } @@ -180,7 +149,7 @@ func Getrlimit(resource int, rlim *Rlimit) error { // Setrlimit prefers the prlimit64 system call. See issue 38604. func Setrlimit(resource int, rlim *Rlimit) error { - err := prlimit(0, resource, rlim, nil) + err := Prlimit(0, resource, rlim, nil) if err != ENOSYS { return err } @@ -211,31 +180,11 @@ func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { rsa.Service_name_len = uint64(length) } -func InotifyInit() (fd int, err error) { - return InotifyInit1(0) -} - -// dup2 exists because func Dup3 in syscall_linux.go references -// it in an unreachable path. dup2 isn't available on arm64. -func dup2(oldfd int, newfd int) error - func Pause() error { _, err := ppoll(nil, 0, nil, nil) return err } -func Poll(fds []PollFd, timeout int) (n int, err error) { - var ts *Timespec - if timeout >= 0 { - ts = new(Timespec) - *ts = NsecToTimespec(int64(timeout) * 1e6) - } - if len(fds) == 0 { - return ppoll(nil, 0, ts, nil) - } - return ppoll(&fds[0], len(fds), ts, nil) -} - //sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go new file mode 100644 index 000000000..0b69c3eff --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -0,0 +1,226 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 && linux +// +build loong64,linux + +package unix + +import "unsafe" + +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fstatfs(fd int, buf *Statfs_t) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getuid() (uid int) +//sys Listen(s int, n int) (err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + var ts *Timespec + if timeout != nil { + ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} + } + return Pselect(nfd, r, w, e, ts, nil) +} + +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) + +func timespecFromStatxTimestamp(x StatxTimestamp) Timespec { + return Timespec{ + Sec: x.Sec, + Nsec: int64(x.Nsec), + } +} + +func Fstatat(fd int, path string, stat *Stat_t, flags int) error { + var r Statx_t + // Do it the glibc way, add AT_NO_AUTOMOUNT. + if err := Statx(fd, path, AT_NO_AUTOMOUNT|flags, STATX_BASIC_STATS, &r); err != nil { + return err + } + + stat.Dev = Mkdev(r.Dev_major, r.Dev_minor) + stat.Ino = r.Ino + stat.Mode = uint32(r.Mode) + stat.Nlink = r.Nlink + stat.Uid = r.Uid + stat.Gid = r.Gid + stat.Rdev = Mkdev(r.Rdev_major, r.Rdev_minor) + // hope we don't get to process files so large to overflow these size + // fields... + stat.Size = int64(r.Size) + stat.Blksize = int32(r.Blksize) + stat.Blocks = int64(r.Blocks) + stat.Atim = timespecFromStatxTimestamp(r.Atime) + stat.Mtim = timespecFromStatxTimestamp(r.Mtime) + stat.Ctim = timespecFromStatxTimestamp(r.Ctime) + + return nil +} + +func Fstat(fd int, stat *Stat_t) (err error) { + return Fstatat(fd, "", stat, AT_EMPTY_PATH) +} + +func Stat(path string, stat *Stat_t) (err error) { + return Fstatat(AT_FDCWD, path, stat, 0) +} + +func Lchown(path string, uid int, gid int) (err error) { + return Fchownat(AT_FDCWD, path, uid, gid, AT_SYMLINK_NOFOLLOW) +} + +func Lstat(path string, stat *Stat_t) (err error) { + return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW) +} + +//sys Statfs(path string, buf *Statfs_t) (err error) +//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) +//sys Truncate(path string, length int64) (err error) + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + return ENOSYS +} + +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) + +//sysnb Gettimeofday(tv *Timeval) (err error) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + err = Prlimit(0, resource, nil, rlim) + return +} + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + err = Prlimit(0, resource, rlim, nil) + return +} + +func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) { + if tv == nil { + return utimensat(dirfd, path, nil, 0) + } + + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func Time(t *Time_t) (Time_t, error) { + var tv Timeval + err := Gettimeofday(&tv) + if err != nil { + return 0, err + } + if t != nil { + *t = Time_t(tv.Sec) + } + return Time_t(tv.Sec), nil +} + +func Utime(path string, buf *Utimbuf) error { + tv := []Timeval{ + {Sec: buf.Actime}, + {Sec: buf.Modtime}, + } + return Utimes(path, tv) +} + +func utimes(path string, tv *[2]Timeval) (err error) { + if tv == nil { + return utimensat(AT_FDCWD, path, nil, 0) + } + + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func (r *PtraceRegs) PC() uint64 { return r.Era } + +func (r *PtraceRegs) SetPC(era uint64) { r.Era = era } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint64(length) +} + +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint64(length) +} + +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + +func Pause() error { + _, err := ppoll(nil, 0, nil, nil) + return err +} + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + return Renameat2(olddirfd, oldpath, newdirfd, newpath, 0) +} + +//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) + +func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { + cmdlineLen := len(cmdline) + if cmdlineLen > 0 { + // Account for the additional NULL byte added by + // BytePtrFromString in kexecFileLoad. The kexec_file_load + // syscall expects a NULL-terminated string. + cmdlineLen++ + } + return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 27aee81d9..98a2660b9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -8,8 +8,6 @@ package unix -//sys dup2(oldfd int, newfd int) (err error) -//sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) @@ -23,8 +21,8 @@ package unix //sys Lchown(path string, uid int, gid int) (err error) //sys Listen(s int, n int) (err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK @@ -50,7 +48,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) //sys Ustat(dev int, ubuf *Ustat_t) (err error) -//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) @@ -94,30 +91,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: sec, Usec: usec} } -func Pipe(p []int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, 0) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -//sysnb pipe2(p *[2]_C_int, flags int) (err error) - -func Pipe2(p []int, flags int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - func Ioperm(from int, num int, on int) (err error) { return ENOSYS } @@ -220,16 +193,3 @@ func (cmsg *Cmsghdr) SetLen(length int) { func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { rsa.Service_name_len = uint64(length) } - -func InotifyInit() (fd int, err error) { - return InotifyInit1(0) -} - -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index 3a5621e37..b8a18c0ad 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -15,8 +15,6 @@ import ( func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) -//sys dup2(oldfd int, newfd int) (err error) -//sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) @@ -27,8 +25,8 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sysnb Getuid() (uid int) //sys Lchown(path string, uid int, gid int) (err error) //sys Listen(s int, n int) (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 @@ -43,7 +41,6 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 //sys Ustat(dev int, ubuf *Ustat_t) (err error) -//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) @@ -60,7 +57,6 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) -//sysnb InotifyInit() (fd int, err error) //sys Ioperm(from int, num int, on int) (err error) //sys Iopl(level int) (err error) @@ -113,29 +109,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: int32(sec), Usec: int32(usec)} } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) - -func Pipe2(p []int, flags int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -//sysnb pipe() (p1 int, p2 int, err error) - -func Pipe(p []int) (err error) { - if len(p) != 2 { - return EINVAL - } - p[0], p[1], err = pipe() - return -} - //sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { @@ -157,7 +130,7 @@ type rlimit32 struct { //sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT func Getrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, nil, rlim) + err = Prlimit(0, resource, nil, rlim) if err != ENOSYS { return err } @@ -185,7 +158,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { //sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, rlim, nil) + err = Prlimit(0, resource, rlim, nil) if err != ENOSYS { return err } @@ -232,12 +205,3 @@ func (cmsg *Cmsghdr) SetLen(length int) { func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { rsa.Service_name_len = uint32(length) } - -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index cf0d36f76..4ed9e67c6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -3,8 +3,7 @@ // license that can be found in the LICENSE file. //go:build linux && ppc -// +build linux -// +build ppc +// +build linux,ppc package unix @@ -13,8 +12,6 @@ import ( "unsafe" ) -//sys dup2(oldfd int, newfd int) (err error) -//sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 @@ -24,15 +21,14 @@ import ( //sysnb Geteuid() (euid int) //sysnb Getgid() (gid int) //sysnb Getuid() (uid int) -//sysnb InotifyInit() (fd int, err error) //sys Ioperm(from int, num int, on int) (err error) //sys Iopl(level int) (err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Listen(s int, n int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 @@ -47,7 +43,6 @@ import ( //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 //sys Ustat(dev int, ubuf *Ustat_t) (err error) -//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) @@ -143,7 +138,7 @@ const rlimInf32 = ^uint32(0) const rlimInf64 = ^uint64(0) func Getrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, nil, rlim) + err = Prlimit(0, resource, nil, rlim) if err != ENOSYS { return err } @@ -171,7 +166,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { //sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, rlim, nil) + err = Prlimit(0, resource, rlim, nil) if err != ENOSYS { return err } @@ -219,41 +214,6 @@ func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { rsa.Service_name_len = uint32(length) } -//sysnb pipe(p *[2]_C_int) (err error) - -func Pipe(p []int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe(&pp) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -//sysnb pipe2(p *[2]_C_int, flags int) (err error) - -func Pipe2(p []int, flags int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} - //sys syncFileRange2(fd int, flags int, off int64, n int64) (err error) = SYS_SYNC_FILE_RANGE2 func SyncFileRange(fd int, off int64, n int64, flags int) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 5259a5fea..db63d384c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -8,8 +8,6 @@ package unix -//sys dup2(oldfd int, newfd int) (err error) -//sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) @@ -22,15 +20,14 @@ package unix //sysnb Getgid() (gid int) //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = SYS_UGETRLIMIT //sysnb Getuid() (uid int) -//sysnb InotifyInit() (fd int, err error) //sys Ioperm(from int, num int, on int) (err error) //sys Iopl(level int) (err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Listen(s int, n int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT @@ -48,7 +45,6 @@ package unix //sys Statfs(path string, buf *Statfs_t) (err error) //sys Truncate(path string, length int64) (err error) //sys Ustat(dev int, ubuf *Ustat_t) (err error) -//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) @@ -104,41 +100,6 @@ func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { rsa.Service_name_len = uint64(length) } -//sysnb pipe(p *[2]_C_int) (err error) - -func Pipe(p []int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe(&pp) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -//sysnb pipe2(p *[2]_C_int, flags int) (err error) - -func Pipe2(p []int, flags int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} - //sys syncFileRange2(fd int, flags int, off int64, n int64) (err error) = SYS_SYNC_FILE_RANGE2 func SyncFileRange(fd int, off int64, n int64, flags int) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 8ef821e5d..925a748a3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -9,13 +9,6 @@ package unix import "unsafe" -func EpollCreate(size int) (fd int, err error) { - if size <= 0 { - return -1, EINVAL - } - return EpollCreate1(0) -} - //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) @@ -29,8 +22,9 @@ func EpollCreate(size int) (fd int, err error) { //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) //sysnb Getuid() (uid int) //sys Listen(s int, n int) (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys MemfdSecret(flags int) (fd int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { @@ -72,7 +66,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { return ENOSYS } -//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) @@ -144,30 +137,6 @@ func utimes(path string, tv *[2]Timeval) (err error) { return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) } -func Pipe(p []int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, 0) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -//sysnb pipe2(p *[2]_C_int, flags int) (err error) - -func Pipe2(p []int, flags int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - func (r *PtraceRegs) PC() uint64 { return r.Pc } func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc } @@ -192,27 +161,11 @@ func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { rsa.Service_name_len = uint64(length) } -func InotifyInit() (fd int, err error) { - return InotifyInit1(0) -} - func Pause() error { _, err := ppoll(nil, 0, nil, nil) return err } -func Poll(fds []PollFd, timeout int) (n int, err error) { - var ts *Timespec - if timeout >= 0 { - ts = new(Timespec) - *ts = NsecToTimespec(int64(timeout) * 1e6) - } - if len(fds) == 0 { - return ppoll(nil, 0, ts, nil) - } - return ppoll(&fds[0], len(fds), ts, nil) -} - func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { return Renameat2(olddirfd, oldpath, newdirfd, newpath, 0) } @@ -229,7 +182,3 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } - -// dup2 exists because func Dup3 in syscall_linux.go references -// it in an unreachable path. dup2 isn't available on arm64. -func dup2(oldfd int, newfd int) error diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index a1c0574b5..6fcf277b0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -11,8 +11,6 @@ import ( "unsafe" ) -//sys dup2(oldfd int, newfd int) (err error) -//sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) @@ -25,12 +23,11 @@ import ( //sysnb Getgid() (gid int) //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) //sysnb Getuid() (uid int) -//sysnb InotifyInit() (fd int, err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) @@ -77,30 +74,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: sec, Usec: usec} } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) - -func Pipe(p []int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, 0) // pipe2 is the same as pipe when flags are set to 0. - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -func Pipe2(p []int, flags int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - func Ioperm(from int, num int, on int) (err error) { return ENOSYS } @@ -172,15 +145,6 @@ const ( netSendMMsg = 20 ) -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (int, error) { - args := [3]uintptr{uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))} - fd, _, err := Syscall(SYS_SOCKETCALL, netAccept, uintptr(unsafe.Pointer(&args)), 0) - if err != 0 { - return 0, err - } - return int(fd), nil -} - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (int, error) { args := [4]uintptr{uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags)} fd, _, err := Syscall(SYS_SOCKETCALL, netAccept4, uintptr(unsafe.Pointer(&args)), 0) @@ -324,15 +288,6 @@ func Shutdown(s, how int) error { return nil } -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} - //sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index de14b8898..02a45d9cc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -9,7 +9,6 @@ package unix //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 -//sys dup2(oldfd int, newfd int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 @@ -20,13 +19,12 @@ package unix //sysnb Getgid() (gid int) //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) //sysnb Getuid() (uid int) -//sysnb InotifyInit() (fd int, err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Listen(s int, n int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) @@ -44,7 +42,6 @@ package unix //sys Statfs(path string, buf *Statfs_t) (err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) -//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) @@ -119,38 +116,3 @@ func (cmsg *Cmsghdr) SetLen(length int) { func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { rsa.Service_name_len = uint64(length) } - -//sysnb pipe(p *[2]_C_int) (err error) - -func Pipe(p []int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe(&pp) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -//sysnb pipe2(p *[2]_C_int, flags int) (err error) - -func Pipe2(p []int, flags int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 853d5f0f4..666f0a1b3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -110,14 +110,8 @@ func direntNamlen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) } -//sysnb pipe() (fd1 int, fd2 int, err error) - func Pipe(p []int) (err error) { - if len(p) != 2 { - return EINVAL - } - p[0], p[1], err = pipe() - return + return Pipe2(p, 0) } //sysnb pipe2(p *[2]_C_int, flags int) (err error) @@ -128,8 +122,10 @@ func Pipe2(p []int, flags int) error { } var pp [2]_C_int err := pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return err } @@ -167,11 +163,6 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return -1, ENOSYS } -func setattrlistTimes(path string, times []Timespec, flags int) error { - // used on Darwin for UtimesNano - return ENOSYS -} - //sys ioctl(fd int, req uint, arg uintptr) (err error) //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL @@ -317,8 +308,8 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) //sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 22b550385..78daceb33 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -81,18 +81,22 @@ func Pipe(p []int) (err error) { } //sysnb pipe2(p *[2]_C_int, flags int) (err error) + func Pipe2(p []int, flags int) error { if len(p) != 2 { return EINVAL } var pp [2]_C_int err := pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return err } //sys Getdents(fd int, buf []byte) (n int, err error) + func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { n, err = Getdents(fd, buf) if err != nil || basep == nil { @@ -147,11 +151,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } -func setattrlistTimes(path string, times []Timespec, flags int) error { - // used on Darwin for UtimesNano - return ENOSYS -} - //sys ioctl(fd int, req uint, arg uintptr) (err error) //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL @@ -272,8 +271,8 @@ func Uname(uname *Utsname) error { //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) //sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 77fcde7c1..932996c75 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -13,7 +13,10 @@ package unix import ( + "fmt" + "os" "runtime" + "sync" "syscall" "unsafe" ) @@ -63,8 +66,10 @@ func Pipe(p []int) (err error) { if n != 0 { return err } - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return nil } @@ -76,8 +81,10 @@ func Pipe2(p []int, flags int) error { } var pp [2]_C_int err := pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return err } @@ -89,9 +96,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil } @@ -104,9 +109,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil } @@ -414,9 +417,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_INET6: @@ -425,9 +426,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } return nil, EAFNOSUPPORT @@ -452,10 +451,9 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.__xnet_recvmsg -func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { +func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { var msg Msghdr - var rsa RawSockaddrAny - msg.Name = (*byte)(unsafe.Pointer(&rsa)) + msg.Name = (*byte)(unsafe.Pointer(rsa)) msg.Namelen = uint32(SizeofSockaddrAny) var iov Iovec if len(p) > 0 { @@ -477,29 +475,12 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from return } oobn = int(msg.Accrightslen) - // source address is only specified if the socket is unconnected - if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(fd, &rsa) - } - return -} - -func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { - _, err = SendmsgN(fd, p, oob, to, flags) return } //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.__xnet_sendmsg -func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { - var ptr unsafe.Pointer - var salen _Socklen - if to != nil { - ptr, salen, err = to.sockaddr() - if err != nil { - return 0, err - } - } +func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(ptr)) msg.Namelen = uint32(salen) @@ -637,6 +618,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Getpriority(which int, who int) (n int, err error) //sysnb Getrlimit(which int, lim *Rlimit) (err error) //sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Getsid(pid int) (sid int, err error) //sysnb Gettimeofday(tv *Timeval) (err error) //sysnb Getuid() (uid int) //sys Kill(pid int, signum syscall.Signal) (err error) @@ -662,8 +644,8 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) //sys Rename(from string, to string) (err error) @@ -744,3 +726,280 @@ func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, e func Munmap(b []byte) (err error) { return mapper.Munmap(b) } + +// Event Ports + +type fileObjCookie struct { + fobj *fileObj + cookie interface{} +} + +// EventPort provides a safe abstraction on top of Solaris/illumos Event Ports. +type EventPort struct { + port int + mu sync.Mutex + fds map[uintptr]*fileObjCookie + paths map[string]*fileObjCookie + // The user cookie presents an interesting challenge from a memory management perspective. + // There are two paths by which we can discover that it is no longer in use: + // 1. The user calls port_dissociate before any events fire + // 2. An event fires and we return it to the user + // The tricky situation is if the event has fired in the kernel but + // the user hasn't requested/received it yet. + // If the user wants to port_dissociate before the event has been processed, + // we should handle things gracefully. To do so, we need to keep an extra + // reference to the cookie around until the event is processed + // thus the otherwise seemingly extraneous "cookies" map + // The key of this map is a pointer to the corresponding &fCookie.cookie + cookies map[*interface{}]*fileObjCookie +} + +// PortEvent is an abstraction of the port_event C struct. +// Compare Source against PORT_SOURCE_FILE or PORT_SOURCE_FD +// to see if Path or Fd was the event source. The other will be +// uninitialized. +type PortEvent struct { + Cookie interface{} + Events int32 + Fd uintptr + Path string + Source uint16 + fobj *fileObj +} + +// NewEventPort creates a new EventPort including the +// underlying call to port_create(3c). +func NewEventPort() (*EventPort, error) { + port, err := port_create() + if err != nil { + return nil, err + } + e := &EventPort{ + port: port, + fds: make(map[uintptr]*fileObjCookie), + paths: make(map[string]*fileObjCookie), + cookies: make(map[*interface{}]*fileObjCookie), + } + return e, nil +} + +//sys port_create() (n int, err error) +//sys port_associate(port int, source int, object uintptr, events int, user *byte) (n int, err error) +//sys port_dissociate(port int, source int, object uintptr) (n int, err error) +//sys port_get(port int, pe *portEvent, timeout *Timespec) (n int, err error) +//sys port_getn(port int, pe *portEvent, max uint32, nget *uint32, timeout *Timespec) (n int, err error) + +// Close closes the event port. +func (e *EventPort) Close() error { + e.mu.Lock() + defer e.mu.Unlock() + err := Close(e.port) + if err != nil { + return err + } + e.fds = nil + e.paths = nil + return nil +} + +// PathIsWatched checks to see if path is associated with this EventPort. +func (e *EventPort) PathIsWatched(path string) bool { + e.mu.Lock() + defer e.mu.Unlock() + _, found := e.paths[path] + return found +} + +// FdIsWatched checks to see if fd is associated with this EventPort. +func (e *EventPort) FdIsWatched(fd uintptr) bool { + e.mu.Lock() + defer e.mu.Unlock() + _, found := e.fds[fd] + return found +} + +// AssociatePath wraps port_associate(3c) for a filesystem path including +// creating the necessary file_obj from the provided stat information. +func (e *EventPort) AssociatePath(path string, stat os.FileInfo, events int, cookie interface{}) error { + e.mu.Lock() + defer e.mu.Unlock() + if _, found := e.paths[path]; found { + return fmt.Errorf("%v is already associated with this Event Port", path) + } + fobj, err := createFileObj(path, stat) + if err != nil { + return err + } + fCookie := &fileObjCookie{fobj, cookie} + _, err = port_associate(e.port, PORT_SOURCE_FILE, uintptr(unsafe.Pointer(fobj)), events, (*byte)(unsafe.Pointer(&fCookie.cookie))) + if err != nil { + return err + } + e.paths[path] = fCookie + e.cookies[&fCookie.cookie] = fCookie + return nil +} + +// DissociatePath wraps port_dissociate(3c) for a filesystem path. +func (e *EventPort) DissociatePath(path string) error { + e.mu.Lock() + defer e.mu.Unlock() + f, ok := e.paths[path] + if !ok { + return fmt.Errorf("%v is not associated with this Event Port", path) + } + _, err := port_dissociate(e.port, PORT_SOURCE_FILE, uintptr(unsafe.Pointer(f.fobj))) + // If the path is no longer associated with this event port (ENOENT) + // we should delete it from our map. We can still return ENOENT to the caller. + // But we need to save the cookie + if err != nil && err != ENOENT { + return err + } + if err == nil { + // dissociate was successful, safe to delete the cookie + fCookie := e.paths[path] + delete(e.cookies, &fCookie.cookie) + } + delete(e.paths, path) + return err +} + +// AssociateFd wraps calls to port_associate(3c) on file descriptors. +func (e *EventPort) AssociateFd(fd uintptr, events int, cookie interface{}) error { + e.mu.Lock() + defer e.mu.Unlock() + if _, found := e.fds[fd]; found { + return fmt.Errorf("%v is already associated with this Event Port", fd) + } + fCookie := &fileObjCookie{nil, cookie} + _, err := port_associate(e.port, PORT_SOURCE_FD, fd, events, (*byte)(unsafe.Pointer(&fCookie.cookie))) + if err != nil { + return err + } + e.fds[fd] = fCookie + e.cookies[&fCookie.cookie] = fCookie + return nil +} + +// DissociateFd wraps calls to port_dissociate(3c) on file descriptors. +func (e *EventPort) DissociateFd(fd uintptr) error { + e.mu.Lock() + defer e.mu.Unlock() + _, ok := e.fds[fd] + if !ok { + return fmt.Errorf("%v is not associated with this Event Port", fd) + } + _, err := port_dissociate(e.port, PORT_SOURCE_FD, fd) + if err != nil && err != ENOENT { + return err + } + if err == nil { + // dissociate was successful, safe to delete the cookie + fCookie := e.fds[fd] + delete(e.cookies, &fCookie.cookie) + } + delete(e.fds, fd) + return err +} + +func createFileObj(name string, stat os.FileInfo) (*fileObj, error) { + fobj := new(fileObj) + bs, err := ByteSliceFromString(name) + if err != nil { + return nil, err + } + fobj.Name = (*int8)(unsafe.Pointer(&bs[0])) + s := stat.Sys().(*syscall.Stat_t) + fobj.Atim.Sec = s.Atim.Sec + fobj.Atim.Nsec = s.Atim.Nsec + fobj.Mtim.Sec = s.Mtim.Sec + fobj.Mtim.Nsec = s.Mtim.Nsec + fobj.Ctim.Sec = s.Ctim.Sec + fobj.Ctim.Nsec = s.Ctim.Nsec + return fobj, nil +} + +// GetOne wraps port_get(3c) and returns a single PortEvent. +func (e *EventPort) GetOne(t *Timespec) (*PortEvent, error) { + pe := new(portEvent) + _, err := port_get(e.port, pe, t) + if err != nil { + return nil, err + } + p := new(PortEvent) + e.mu.Lock() + defer e.mu.Unlock() + e.peIntToExt(pe, p) + return p, nil +} + +// peIntToExt converts a cgo portEvent struct into the friendlier PortEvent +// NOTE: Always call this function while holding the e.mu mutex +func (e *EventPort) peIntToExt(peInt *portEvent, peExt *PortEvent) { + peExt.Events = peInt.Events + peExt.Source = peInt.Source + cookie := (*interface{})(unsafe.Pointer(peInt.User)) + peExt.Cookie = *cookie + switch peInt.Source { + case PORT_SOURCE_FD: + delete(e.cookies, cookie) + peExt.Fd = uintptr(peInt.Object) + // Only remove the fds entry if it exists and this cookie matches + if fobj, ok := e.fds[peExt.Fd]; ok { + if &fobj.cookie == cookie { + delete(e.fds, peExt.Fd) + } + } + case PORT_SOURCE_FILE: + if fCookie, ok := e.cookies[cookie]; ok && uintptr(unsafe.Pointer(fCookie.fobj)) == uintptr(peInt.Object) { + // Use our stashed reference rather than using unsafe on what we got back + // the unsafe version would be (*fileObj)(unsafe.Pointer(uintptr(peInt.Object))) + peExt.fobj = fCookie.fobj + } else { + panic("mismanaged memory") + } + delete(e.cookies, cookie) + peExt.Path = BytePtrToString((*byte)(unsafe.Pointer(peExt.fobj.Name))) + // Only remove the paths entry if it exists and this cookie matches + if fobj, ok := e.paths[peExt.Path]; ok { + if &fobj.cookie == cookie { + delete(e.paths, peExt.Path) + } + } + } +} + +// Pending wraps port_getn(3c) and returns how many events are pending. +func (e *EventPort) Pending() (int, error) { + var n uint32 = 0 + _, err := port_getn(e.port, nil, 0, &n, nil) + return int(n), err +} + +// Get wraps port_getn(3c) and fills a slice of PortEvent. +// It will block until either min events have been received +// or the timeout has been exceeded. It will return how many +// events were actually received along with any error information. +func (e *EventPort) Get(s []PortEvent, min int, timeout *Timespec) (int, error) { + if min == 0 { + return 0, fmt.Errorf("need to request at least one event or use Pending() instead") + } + if len(s) < min { + return 0, fmt.Errorf("len(s) (%d) is less than min events requested (%d)", len(s), min) + } + got := uint32(min) + max := uint32(len(s)) + var err error + ps := make([]portEvent, max, max) + _, err = port_getn(e.port, &ps[0], max, &got, timeout) + // got will be trustworthy with ETIME, but not any other error. + if err != nil && err != ETIME { + return 0, err + } + e.mu.Lock() + defer e.mu.Unlock() + for i := 0; i < int(got); i++ { + e.peIntToExt(&ps[i], &s[i]) + } + return int(got), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index a7618ceb5..70508afc1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -177,6 +177,30 @@ func Write(fd int, p []byte) (n int, err error) { return } +func Pread(fd int, p []byte, offset int64) (n int, err error) { + n, err = pread(fd, p, offset) + if raceenabled { + if n > 0 { + raceWriteRange(unsafe.Pointer(&p[0]), n) + } + if err == nil { + raceAcquire(unsafe.Pointer(&ioSync)) + } + } + return +} + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = pwrite(fd, p, offset) + if raceenabled && n > 0 { + raceReadRange(unsafe.Pointer(&p[0]), n) + } + return +} + // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. var SocketDisableIPv6 bool @@ -313,6 +337,37 @@ func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { return } +func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + var rsa RawSockaddrAny + n, oobn, recvflags, err = recvmsgRaw(fd, p, oob, flags, &rsa) + // source address is only specified if the socket is unconnected + if rsa.Addr.Family != AF_UNSPEC { + from, err = anyToSockaddr(fd, &rsa) + } + return +} + +func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { + _, err = SendmsgN(fd, p, oob, to, flags) + return +} + +func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + ptr, salen, err = to.sockaddr() + if err != nil { + return 0, err + } + } + return sendmsgN(fd, p, oob, ptr, salen, flags) +} + +func Send(s int, buf []byte, flags int) (err error) { + return sendto(s, buf, flags, nil, 0) +} + func Sendto(fd int, p []byte, flags int, to Sockaddr) (err error) { ptr, n, err := to.sockaddr() if err != nil { diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 1ffd8bfcf..f8616f454 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -67,9 +67,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -83,9 +81,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -144,9 +140,7 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_INET6: @@ -155,9 +149,7 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } return nil, EAFNOSUPPORT @@ -587,8 +579,10 @@ func Pipe(p []int) (err error) { } var pp [2]_C_int err = pipe(&pp) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return } diff --git a/vendor/golang.org/x/sys/unix/sysvshm_linux.go b/vendor/golang.org/x/sys/unix/sysvshm_linux.go new file mode 100644 index 000000000..2c3a4437f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/sysvshm_linux.go @@ -0,0 +1,21 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux +// +build linux + +package unix + +import "runtime" + +// SysvShmCtl performs control operations on the shared memory segment +// specified by id. +func SysvShmCtl(id, cmd int, desc *SysvShmDesc) (result int, err error) { + if runtime.GOARCH == "arm" || + runtime.GOARCH == "mips64" || runtime.GOARCH == "mips64le" { + cmd |= ipc_64 + } + + return shmctl(id, cmd, desc) +} diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go new file mode 100644 index 000000000..0bb4c8de5 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go @@ -0,0 +1,61 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (darwin && !ios) || linux +// +build darwin,!ios linux + +package unix + +import ( + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" +) + +// SysvShmAttach attaches the Sysv shared memory segment associated with the +// shared memory identifier id. +func SysvShmAttach(id int, addr uintptr, flag int) ([]byte, error) { + addr, errno := shmat(id, addr, flag) + if errno != nil { + return nil, errno + } + + // Retrieve the size of the shared memory to enable slice creation + var info SysvShmDesc + + _, err := SysvShmCtl(id, IPC_STAT, &info) + if err != nil { + // release the shared memory if we can't find the size + + // ignoring error from shmdt as there's nothing sensible to return here + shmdt(addr) + return nil, err + } + + // Use unsafe to convert addr into a []byte. + // TODO: convert to unsafe.Slice once we can assume Go 1.17 + var b []byte + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b)) + hdr.Data = unsafe.Pointer(addr) + hdr.Cap = int(info.Segsz) + hdr.Len = int(info.Segsz) + return b, nil +} + +// SysvShmDetach unmaps the shared memory slice returned from SysvShmAttach. +// +// It is not safe to use the slice after calling this function. +func SysvShmDetach(data []byte) error { + if len(data) == 0 { + return EINVAL + } + + return shmdt(uintptr(unsafe.Pointer(&data[0]))) +} + +// SysvShmGet returns the Sysv shared memory identifier associated with key. +// If the IPC_CREAT flag is specified a new segment is created. +func SysvShmGet(key, size, flag int) (id int, err error) { + return shmget(key, size, flag) +} diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go new file mode 100644 index 000000000..71bddefdb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go @@ -0,0 +1,14 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && !ios +// +build darwin,!ios + +package unix + +// SysvShmCtl performs control operations on the shared memory segment +// specified by id. +func SysvShmCtl(id, cmd int, desc *SysvShmDesc) (result int, err error) { + return shmctl(id, cmd, desc) +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 991996b60..476a1c7e7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -12,1550 +12,1582 @@ package unix import "syscall" const ( - AF_APPLETALK = 0x10 - AF_CCITT = 0xa - AF_CHAOS = 0x5 - AF_CNT = 0x15 - AF_COIP = 0x14 - AF_DATAKIT = 0x9 - AF_DECnet = 0xc - AF_DLI = 0xd - AF_E164 = 0x1c - AF_ECMA = 0x8 - AF_HYLINK = 0xf - AF_IEEE80211 = 0x25 - AF_IMPLINK = 0x3 - AF_INET = 0x2 - AF_INET6 = 0x1e - AF_IPX = 0x17 - AF_ISDN = 0x1c - AF_ISO = 0x7 - AF_LAT = 0xe - AF_LINK = 0x12 - AF_LOCAL = 0x1 - AF_MAX = 0x29 - AF_NATM = 0x1f - AF_NDRV = 0x1b - AF_NETBIOS = 0x21 - AF_NS = 0x6 - AF_OSI = 0x7 - AF_PPP = 0x22 - AF_PUP = 0x4 - AF_RESERVED_36 = 0x24 - AF_ROUTE = 0x11 - AF_SIP = 0x18 - AF_SNA = 0xb - AF_SYSTEM = 0x20 - AF_SYS_CONTROL = 0x2 - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_UTUN = 0x26 - AF_VSOCK = 0x28 - ALTWERASE = 0x200 - ATTR_BIT_MAP_COUNT = 0x5 - ATTR_CMN_ACCESSMASK = 0x20000 - ATTR_CMN_ACCTIME = 0x1000 - ATTR_CMN_ADDEDTIME = 0x10000000 - ATTR_CMN_BKUPTIME = 0x2000 - ATTR_CMN_CHGTIME = 0x800 - ATTR_CMN_CRTIME = 0x200 - ATTR_CMN_DATA_PROTECT_FLAGS = 0x40000000 - ATTR_CMN_DEVID = 0x2 - ATTR_CMN_DOCUMENT_ID = 0x100000 - ATTR_CMN_ERROR = 0x20000000 - ATTR_CMN_EXTENDED_SECURITY = 0x400000 - ATTR_CMN_FILEID = 0x2000000 - ATTR_CMN_FLAGS = 0x40000 - ATTR_CMN_FNDRINFO = 0x4000 - ATTR_CMN_FSID = 0x4 - ATTR_CMN_FULLPATH = 0x8000000 - ATTR_CMN_GEN_COUNT = 0x80000 - ATTR_CMN_GRPID = 0x10000 - ATTR_CMN_GRPUUID = 0x1000000 - ATTR_CMN_MODTIME = 0x400 - ATTR_CMN_NAME = 0x1 - ATTR_CMN_NAMEDATTRCOUNT = 0x80000 - ATTR_CMN_NAMEDATTRLIST = 0x100000 - ATTR_CMN_OBJID = 0x20 - ATTR_CMN_OBJPERMANENTID = 0x40 - ATTR_CMN_OBJTAG = 0x10 - ATTR_CMN_OBJTYPE = 0x8 - ATTR_CMN_OWNERID = 0x8000 - ATTR_CMN_PARENTID = 0x4000000 - ATTR_CMN_PAROBJID = 0x80 - ATTR_CMN_RETURNED_ATTRS = 0x80000000 - ATTR_CMN_SCRIPT = 0x100 - ATTR_CMN_SETMASK = 0x51c7ff00 - ATTR_CMN_USERACCESS = 0x200000 - ATTR_CMN_UUID = 0x800000 - ATTR_CMN_VALIDMASK = 0xffffffff - ATTR_CMN_VOLSETMASK = 0x6700 - ATTR_FILE_ALLOCSIZE = 0x4 - ATTR_FILE_CLUMPSIZE = 0x10 - ATTR_FILE_DATAALLOCSIZE = 0x400 - ATTR_FILE_DATAEXTENTS = 0x800 - ATTR_FILE_DATALENGTH = 0x200 - ATTR_FILE_DEVTYPE = 0x20 - ATTR_FILE_FILETYPE = 0x40 - ATTR_FILE_FORKCOUNT = 0x80 - ATTR_FILE_FORKLIST = 0x100 - ATTR_FILE_IOBLOCKSIZE = 0x8 - ATTR_FILE_LINKCOUNT = 0x1 - ATTR_FILE_RSRCALLOCSIZE = 0x2000 - ATTR_FILE_RSRCEXTENTS = 0x4000 - ATTR_FILE_RSRCLENGTH = 0x1000 - ATTR_FILE_SETMASK = 0x20 - ATTR_FILE_TOTALSIZE = 0x2 - ATTR_FILE_VALIDMASK = 0x37ff - ATTR_VOL_ALLOCATIONCLUMP = 0x40 - ATTR_VOL_ATTRIBUTES = 0x40000000 - ATTR_VOL_CAPABILITIES = 0x20000 - ATTR_VOL_DIRCOUNT = 0x400 - ATTR_VOL_ENCODINGSUSED = 0x10000 - ATTR_VOL_FILECOUNT = 0x200 - ATTR_VOL_FSTYPE = 0x1 - ATTR_VOL_INFO = 0x80000000 - ATTR_VOL_IOBLOCKSIZE = 0x80 - ATTR_VOL_MAXOBJCOUNT = 0x800 - ATTR_VOL_MINALLOCATION = 0x20 - ATTR_VOL_MOUNTEDDEVICE = 0x8000 - ATTR_VOL_MOUNTFLAGS = 0x4000 - ATTR_VOL_MOUNTPOINT = 0x1000 - ATTR_VOL_NAME = 0x2000 - ATTR_VOL_OBJCOUNT = 0x100 - ATTR_VOL_QUOTA_SIZE = 0x10000000 - ATTR_VOL_RESERVED_SIZE = 0x20000000 - ATTR_VOL_SETMASK = 0x80002000 - ATTR_VOL_SIGNATURE = 0x2 - ATTR_VOL_SIZE = 0x4 - ATTR_VOL_SPACEAVAIL = 0x10 - ATTR_VOL_SPACEFREE = 0x8 - ATTR_VOL_UUID = 0x40000 - ATTR_VOL_VALIDMASK = 0xf007ffff - B0 = 0x0 - B110 = 0x6e - B115200 = 0x1c200 - B1200 = 0x4b0 - B134 = 0x86 - B14400 = 0x3840 - B150 = 0x96 - B1800 = 0x708 - B19200 = 0x4b00 - B200 = 0xc8 - B230400 = 0x38400 - B2400 = 0x960 - B28800 = 0x7080 - B300 = 0x12c - B38400 = 0x9600 - B4800 = 0x12c0 - B50 = 0x32 - B57600 = 0xe100 - B600 = 0x258 - B7200 = 0x1c20 - B75 = 0x4b - B76800 = 0x12c00 - B9600 = 0x2580 - BIOCFLUSH = 0x20004268 - BIOCGBLEN = 0x40044266 - BIOCGDLT = 0x4004426a - BIOCGDLTLIST = 0xc00c4279 - BIOCGETIF = 0x4020426b - BIOCGHDRCMPLT = 0x40044274 - BIOCGRSIG = 0x40044272 - BIOCGRTIMEOUT = 0x4010426e - BIOCGSEESENT = 0x40044276 - BIOCGSTATS = 0x4008426f - BIOCIMMEDIATE = 0x80044270 - BIOCPROMISC = 0x20004269 - BIOCSBLEN = 0xc0044266 - BIOCSDLT = 0x80044278 - BIOCSETF = 0x80104267 - BIOCSETFNR = 0x8010427e - BIOCSETIF = 0x8020426c - BIOCSHDRCMPLT = 0x80044275 - BIOCSRSIG = 0x80044273 - BIOCSRTIMEOUT = 0x8010426d - BIOCSSEESENT = 0x80044277 - BIOCVERSION = 0x40044271 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ALIGNMENT = 0x4 - BPF_ALU = 0x4 - BPF_AND = 0x50 - BPF_B = 0x10 - BPF_DIV = 0x30 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JMP = 0x5 - BPF_JSET = 0x40 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXBUFSIZE = 0x80000 - BPF_MAXINSNS = 0x200 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINBUFSIZE = 0x20 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_OR = 0x40 - BPF_RELEASE = 0x30bb6 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAX = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x8000 - BSDLY = 0x8000 - CFLUSH = 0xf - CLOCAL = 0x8000 - CLOCK_MONOTONIC = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_MONOTONIC_RAW_APPROX = 0x5 - CLOCK_PROCESS_CPUTIME_ID = 0xc - CLOCK_REALTIME = 0x0 - CLOCK_THREAD_CPUTIME_ID = 0x10 - CLOCK_UPTIME_RAW = 0x8 - CLOCK_UPTIME_RAW_APPROX = 0x9 - CLONE_NOFOLLOW = 0x1 - CLONE_NOOWNERCOPY = 0x2 - CR0 = 0x0 - CR1 = 0x1000 - CR2 = 0x2000 - CR3 = 0x3000 - CRDLY = 0x3000 - CREAD = 0x800 - CRTSCTS = 0x30000 - CS5 = 0x0 - CS6 = 0x100 - CS7 = 0x200 - CS8 = 0x300 - CSIZE = 0x300 - CSTART = 0x11 - CSTATUS = 0x14 - CSTOP = 0x13 - CSTOPB = 0x400 - CSUSP = 0x1a - CTLIOCGINFO = 0xc0644e03 - CTL_HW = 0x6 - CTL_KERN = 0x1 - CTL_MAXNAME = 0xc - CTL_NET = 0x4 - DLT_A429 = 0xb8 - DLT_A653_ICM = 0xb9 - DLT_AIRONET_HEADER = 0x78 - DLT_AOS = 0xde - DLT_APPLE_IP_OVER_IEEE1394 = 0x8a - DLT_ARCNET = 0x7 - DLT_ARCNET_LINUX = 0x81 - DLT_ATM_CLIP = 0x13 - DLT_ATM_RFC1483 = 0xb - DLT_AURORA = 0x7e - DLT_AX25 = 0x3 - DLT_AX25_KISS = 0xca - DLT_BACNET_MS_TP = 0xa5 - DLT_BLUETOOTH_HCI_H4 = 0xbb - DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 - DLT_CAN20B = 0xbe - DLT_CAN_SOCKETCAN = 0xe3 - DLT_CHAOS = 0x5 - DLT_CHDLC = 0x68 - DLT_CISCO_IOS = 0x76 - DLT_C_HDLC = 0x68 - DLT_C_HDLC_WITH_DIR = 0xcd - DLT_DBUS = 0xe7 - DLT_DECT = 0xdd - DLT_DOCSIS = 0x8f - DLT_DVB_CI = 0xeb - DLT_ECONET = 0x73 - DLT_EN10MB = 0x1 - DLT_EN3MB = 0x2 - DLT_ENC = 0x6d - DLT_ERF = 0xc5 - DLT_ERF_ETH = 0xaf - DLT_ERF_POS = 0xb0 - DLT_FC_2 = 0xe0 - DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 - DLT_FDDI = 0xa - DLT_FLEXRAY = 0xd2 - DLT_FRELAY = 0x6b - DLT_FRELAY_WITH_DIR = 0xce - DLT_GCOM_SERIAL = 0xad - DLT_GCOM_T1E1 = 0xac - DLT_GPF_F = 0xab - DLT_GPF_T = 0xaa - DLT_GPRS_LLC = 0xa9 - DLT_GSMTAP_ABIS = 0xda - DLT_GSMTAP_UM = 0xd9 - DLT_HHDLC = 0x79 - DLT_IBM_SN = 0x92 - DLT_IBM_SP = 0x91 - DLT_IEEE802 = 0x6 - DLT_IEEE802_11 = 0x69 - DLT_IEEE802_11_RADIO = 0x7f - DLT_IEEE802_11_RADIO_AVS = 0xa3 - DLT_IEEE802_15_4 = 0xc3 - DLT_IEEE802_15_4_LINUX = 0xbf - DLT_IEEE802_15_4_NOFCS = 0xe6 - DLT_IEEE802_15_4_NONASK_PHY = 0xd7 - DLT_IEEE802_16_MAC_CPS = 0xbc - DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 - DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 - DLT_IPMB_LINUX = 0xd1 - DLT_IPNET = 0xe2 - DLT_IPOIB = 0xf2 - DLT_IPV4 = 0xe4 - DLT_IPV6 = 0xe5 - DLT_IP_OVER_FC = 0x7a - DLT_JUNIPER_ATM1 = 0x89 - DLT_JUNIPER_ATM2 = 0x87 - DLT_JUNIPER_ATM_CEMIC = 0xee - DLT_JUNIPER_CHDLC = 0xb5 - DLT_JUNIPER_ES = 0x84 - DLT_JUNIPER_ETHER = 0xb2 - DLT_JUNIPER_FIBRECHANNEL = 0xea - DLT_JUNIPER_FRELAY = 0xb4 - DLT_JUNIPER_GGSN = 0x85 - DLT_JUNIPER_ISM = 0xc2 - DLT_JUNIPER_MFR = 0x86 - DLT_JUNIPER_MLFR = 0x83 - DLT_JUNIPER_MLPPP = 0x82 - DLT_JUNIPER_MONITOR = 0xa4 - DLT_JUNIPER_PIC_PEER = 0xae - DLT_JUNIPER_PPP = 0xb3 - DLT_JUNIPER_PPPOE = 0xa7 - DLT_JUNIPER_PPPOE_ATM = 0xa8 - DLT_JUNIPER_SERVICES = 0x88 - DLT_JUNIPER_SRX_E2E = 0xe9 - DLT_JUNIPER_ST = 0xc8 - DLT_JUNIPER_VP = 0xb7 - DLT_JUNIPER_VS = 0xe8 - DLT_LAPB_WITH_DIR = 0xcf - DLT_LAPD = 0xcb - DLT_LIN = 0xd4 - DLT_LINUX_EVDEV = 0xd8 - DLT_LINUX_IRDA = 0x90 - DLT_LINUX_LAPD = 0xb1 - DLT_LINUX_PPP_WITHDIRECTION = 0xa6 - DLT_LINUX_SLL = 0x71 - DLT_LOOP = 0x6c - DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x10a - DLT_MATCHING_MIN = 0x68 - DLT_MFR = 0xb6 - DLT_MOST = 0xd3 - DLT_MPEG_2_TS = 0xf3 - DLT_MPLS = 0xdb - DLT_MTP2 = 0x8c - DLT_MTP2_WITH_PHDR = 0x8b - DLT_MTP3 = 0x8d - DLT_MUX27010 = 0xec - DLT_NETANALYZER = 0xf0 - DLT_NETANALYZER_TRANSPARENT = 0xf1 - DLT_NFC_LLCP = 0xf5 - DLT_NFLOG = 0xef - DLT_NG40 = 0xf4 - DLT_NULL = 0x0 - DLT_PCI_EXP = 0x7d - DLT_PFLOG = 0x75 - DLT_PFSYNC = 0x12 - DLT_PPI = 0xc0 - DLT_PPP = 0x9 - DLT_PPP_BSDOS = 0x10 - DLT_PPP_ETHER = 0x33 - DLT_PPP_PPPD = 0xa6 - DLT_PPP_SERIAL = 0x32 - DLT_PPP_WITH_DIR = 0xcc - DLT_PPP_WITH_DIRECTION = 0xa6 - DLT_PRISM_HEADER = 0x77 - DLT_PRONET = 0x4 - DLT_RAIF1 = 0xc6 - DLT_RAW = 0xc - DLT_RIO = 0x7c - DLT_SCCP = 0x8e - DLT_SITA = 0xc4 - DLT_SLIP = 0x8 - DLT_SLIP_BSDOS = 0xf - DLT_STANAG_5066_D_PDU = 0xed - DLT_SUNATM = 0x7b - DLT_SYMANTEC_FIREWALL = 0x63 - DLT_TZSP = 0x80 - DLT_USB = 0xba - DLT_USB_DARWIN = 0x10a - DLT_USB_LINUX = 0xbd - DLT_USB_LINUX_MMAPPED = 0xdc - DLT_USER0 = 0x93 - DLT_USER1 = 0x94 - DLT_USER10 = 0x9d - DLT_USER11 = 0x9e - DLT_USER12 = 0x9f - DLT_USER13 = 0xa0 - DLT_USER14 = 0xa1 - DLT_USER15 = 0xa2 - DLT_USER2 = 0x95 - DLT_USER3 = 0x96 - DLT_USER4 = 0x97 - DLT_USER5 = 0x98 - DLT_USER6 = 0x99 - DLT_USER7 = 0x9a - DLT_USER8 = 0x9b - DLT_USER9 = 0x9c - DLT_WIHART = 0xdf - DLT_X2E_SERIAL = 0xd5 - DLT_X2E_XORAYA = 0xd6 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x40 - ECHOE = 0x2 - ECHOK = 0x4 - ECHOKE = 0x1 - ECHONL = 0x10 - ECHOPRT = 0x20 - EVFILT_AIO = -0x3 - EVFILT_EXCEPT = -0xf - EVFILT_FS = -0x9 - EVFILT_MACHPORT = -0x8 - EVFILT_PROC = -0x5 - EVFILT_READ = -0x1 - EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x11 - EVFILT_THREADMARKER = 0x11 - EVFILT_TIMER = -0x7 - EVFILT_USER = -0xa - EVFILT_VM = -0xc - EVFILT_VNODE = -0x4 - EVFILT_WRITE = -0x2 - EV_ADD = 0x1 - EV_CLEAR = 0x20 - EV_DELETE = 0x2 - EV_DISABLE = 0x8 - EV_DISPATCH = 0x80 - EV_DISPATCH2 = 0x180 - EV_ENABLE = 0x4 - EV_EOF = 0x8000 - EV_ERROR = 0x4000 - EV_FLAG0 = 0x1000 - EV_FLAG1 = 0x2000 - EV_ONESHOT = 0x10 - EV_OOBAND = 0x2000 - EV_POLL = 0x1000 - EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 - EV_UDATA_SPECIFIC = 0x100 - EV_VANISHED = 0x200 - EXTA = 0x4b00 - EXTB = 0x9600 - EXTPROC = 0x800 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x4000 - FFDLY = 0x4000 - FLUSHO = 0x800000 - FSOPT_ATTR_CMN_EXTENDED = 0x20 - FSOPT_NOFOLLOW = 0x1 - FSOPT_NOINMEMUPDATE = 0x2 - FSOPT_PACK_INVAL_ATTRS = 0x8 - FSOPT_REPORT_FULLSIZE = 0x4 - FSOPT_RETURN_REALDEV = 0x200 - F_ADDFILESIGS = 0x3d - F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 - F_ADDFILESIGS_INFO = 0x67 - F_ADDFILESIGS_RETURN = 0x61 - F_ADDFILESUPPL = 0x68 - F_ADDSIGS = 0x3b - F_ALLOCATEALL = 0x4 - F_ALLOCATECONTIG = 0x2 - F_BARRIERFSYNC = 0x55 - F_CHECK_LV = 0x62 - F_CHKCLEAN = 0x29 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x43 - F_FINDSIGS = 0x4e - F_FLUSH_DATA = 0x28 - F_FREEZE_FS = 0x35 - F_FULLFSYNC = 0x33 - F_GETCODEDIR = 0x48 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLK = 0x7 - F_GETLKPID = 0x42 - F_GETNOSIGPIPE = 0x4a - F_GETOWN = 0x5 - F_GETPATH = 0x32 - F_GETPATH_MTMINFO = 0x47 - F_GETPATH_NOFIRMLINK = 0x66 - F_GETPROTECTIONCLASS = 0x3f - F_GETPROTECTIONLEVEL = 0x4d - F_GETSIGSINFO = 0x69 - F_GLOBAL_NOCACHE = 0x37 - F_LOG2PHYS = 0x31 - F_LOG2PHYS_EXT = 0x41 - F_NOCACHE = 0x30 - F_NODIRECT = 0x3e - F_OK = 0x0 - F_PATHPKG_CHECK = 0x34 - F_PEOFPOSMODE = 0x3 - F_PREALLOCATE = 0x2a - F_PUNCHHOLE = 0x63 - F_RDADVISE = 0x2c - F_RDAHEAD = 0x2d - F_RDLCK = 0x1 - F_SETBACKINGSTORE = 0x46 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLK = 0x8 - F_SETLKW = 0x9 - F_SETLKWTIMEOUT = 0xa - F_SETNOSIGPIPE = 0x49 - F_SETOWN = 0x6 - F_SETPROTECTIONCLASS = 0x40 - F_SETSIZE = 0x2b - F_SINGLE_WRITER = 0x4c - F_SPECULATIVE_READ = 0x65 - F_THAW_FS = 0x36 - F_TRANSCODEKEY = 0x4b - F_TRIM_ACTIVE_FILE = 0x64 - F_UNLCK = 0x2 - F_VOLPOSMODE = 0x4 - F_WRLCK = 0x3 - HUPCL = 0x4000 - HW_MACHINE = 0x1 - ICANON = 0x100 - ICMP6_FILTER = 0x12 - ICRNL = 0x100 - IEXTEN = 0x400 - IFF_ALLMULTI = 0x200 - IFF_ALTPHYS = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_LINK0 = 0x1000 - IFF_LINK1 = 0x2000 - IFF_LINK2 = 0x4000 - IFF_LOOPBACK = 0x8 - IFF_MULTICAST = 0x8000 - IFF_NOARP = 0x80 - IFF_NOTRAILERS = 0x20 - IFF_OACTIVE = 0x400 - IFF_POINTOPOINT = 0x10 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SIMPLEX = 0x800 - IFF_UP = 0x1 - IFNAMSIZ = 0x10 - IFT_1822 = 0x2 - IFT_6LOWPAN = 0x40 - IFT_AAL5 = 0x31 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ATM = 0x25 - IFT_BRIDGE = 0xd1 - IFT_CARP = 0xf8 - IFT_CELLULAR = 0xff - IFT_CEPT = 0x13 - IFT_DS3 = 0x1e - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_ETHER = 0x6 - IFT_FAITH = 0x38 - IFT_FDDI = 0xf - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_GIF = 0x37 - IFT_HDH1822 = 0x3 - IFT_HIPPI = 0x2f - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IEEE1394 = 0x90 - IFT_IEEE8023ADLAG = 0x88 - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88026 = 0xa - IFT_L2VLAN = 0x87 - IFT_LAPB = 0x10 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_NSIP = 0x1b - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PDP = 0xff - IFT_PFLOG = 0xf5 - IFT_PFSYNC = 0xf6 - IFT_PKTAP = 0xfe - IFT_PPP = 0x17 - IFT_PROPMUX = 0x36 - IFT_PROPVIRTUAL = 0x35 - IFT_PTPSERIAL = 0x16 - IFT_RS232 = 0x21 - IFT_SDLC = 0x11 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_STARLAN = 0xb - IFT_STF = 0x39 - IFT_T1 = 0x12 - IFT_ULTRA = 0x1d - IFT_V35 = 0x2d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLASSD_HOST = 0xfffffff - IN_CLASSD_NET = 0xf0000000 - IN_CLASSD_NSHIFT = 0x1c - IN_LINKLOCALNETNUM = 0xa9fe0000 - IN_LOOPBACKNET = 0x7f - IPPROTO_3PC = 0x22 - IPPROTO_ADFS = 0x44 - IPPROTO_AH = 0x33 - IPPROTO_AHIP = 0x3d - IPPROTO_APES = 0x63 - IPPROTO_ARGUS = 0xd - IPPROTO_AX25 = 0x5d - IPPROTO_BHA = 0x31 - IPPROTO_BLT = 0x1e - IPPROTO_BRSATMON = 0x4c - IPPROTO_CFTP = 0x3e - IPPROTO_CHAOS = 0x10 - IPPROTO_CMTP = 0x26 - IPPROTO_CPHB = 0x49 - IPPROTO_CPNX = 0x48 - IPPROTO_DDP = 0x25 - IPPROTO_DGP = 0x56 - IPPROTO_DIVERT = 0xfe - IPPROTO_DONE = 0x101 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_EMCON = 0xe - IPPROTO_ENCAP = 0x62 - IPPROTO_EON = 0x50 - IPPROTO_ESP = 0x32 - IPPROTO_ETHERIP = 0x61 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GGP = 0x3 - IPPROTO_GMTP = 0x64 - IPPROTO_GRE = 0x2f - IPPROTO_HELLO = 0x3f - IPPROTO_HMP = 0x14 - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IDPR = 0x23 - IPPROTO_IDRP = 0x2d - IPPROTO_IGMP = 0x2 - IPPROTO_IGP = 0x55 - IPPROTO_IGRP = 0x58 - IPPROTO_IL = 0x28 - IPPROTO_INLSP = 0x34 - IPPROTO_INP = 0x20 - IPPROTO_IP = 0x0 - IPPROTO_IPCOMP = 0x6c - IPPROTO_IPCV = 0x47 - IPPROTO_IPEIP = 0x5e - IPPROTO_IPIP = 0x4 - IPPROTO_IPPC = 0x43 - IPPROTO_IPV4 = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_IRTP = 0x1c - IPPROTO_KRYPTOLAN = 0x41 - IPPROTO_LARP = 0x5b - IPPROTO_LEAF1 = 0x19 - IPPROTO_LEAF2 = 0x1a - IPPROTO_MAX = 0x100 - IPPROTO_MAXID = 0x34 - IPPROTO_MEAS = 0x13 - IPPROTO_MHRP = 0x30 - IPPROTO_MICP = 0x5f - IPPROTO_MTP = 0x5c - IPPROTO_MUX = 0x12 - IPPROTO_ND = 0x4d - IPPROTO_NHRP = 0x36 - IPPROTO_NONE = 0x3b - IPPROTO_NSP = 0x1f - IPPROTO_NVPII = 0xb - IPPROTO_OSPFIGP = 0x59 - IPPROTO_PGM = 0x71 - IPPROTO_PIGP = 0x9 - IPPROTO_PIM = 0x67 - IPPROTO_PRM = 0x15 - IPPROTO_PUP = 0xc - IPPROTO_PVP = 0x4b - IPPROTO_RAW = 0xff - IPPROTO_RCCMON = 0xa - IPPROTO_RDP = 0x1b - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_RVD = 0x42 - IPPROTO_SATEXPAK = 0x40 - IPPROTO_SATMON = 0x45 - IPPROTO_SCCSP = 0x60 - IPPROTO_SCTP = 0x84 - IPPROTO_SDRP = 0x2a - IPPROTO_SEP = 0x21 - IPPROTO_SRPC = 0x5a - IPPROTO_ST = 0x7 - IPPROTO_SVMTP = 0x52 - IPPROTO_SWIPE = 0x35 - IPPROTO_TCF = 0x57 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_TPXX = 0x27 - IPPROTO_TRUNK1 = 0x17 - IPPROTO_TRUNK2 = 0x18 - IPPROTO_TTP = 0x54 - IPPROTO_UDP = 0x11 - IPPROTO_VINES = 0x53 - IPPROTO_VISA = 0x46 - IPPROTO_VMTP = 0x51 - IPPROTO_WBEXPAK = 0x4f - IPPROTO_WBMON = 0x4e - IPPROTO_WSN = 0x4a - IPPROTO_XNET = 0xf - IPPROTO_XTP = 0x24 - IPV6_2292DSTOPTS = 0x17 - IPV6_2292HOPLIMIT = 0x14 - IPV6_2292HOPOPTS = 0x16 - IPV6_2292NEXTHOP = 0x15 - IPV6_2292PKTINFO = 0x13 - IPV6_2292PKTOPTIONS = 0x19 - IPV6_2292RTHDR = 0x18 - IPV6_3542DSTOPTS = 0x32 - IPV6_3542HOPLIMIT = 0x2f - IPV6_3542HOPOPTS = 0x31 - IPV6_3542NEXTHOP = 0x30 - IPV6_3542PKTINFO = 0x2e - IPV6_3542RTHDR = 0x33 - IPV6_ADDR_MC_FLAGS_PREFIX = 0x20 - IPV6_ADDR_MC_FLAGS_TRANSIENT = 0x10 - IPV6_ADDR_MC_FLAGS_UNICAST_BASED = 0x30 - IPV6_AUTOFLOWLABEL = 0x3b - IPV6_BINDV6ONLY = 0x1b - IPV6_BOUND_IF = 0x7d - IPV6_CHECKSUM = 0x1a - IPV6_DEFAULT_MULTICAST_HOPS = 0x1 - IPV6_DEFAULT_MULTICAST_LOOP = 0x1 - IPV6_DEFHLIM = 0x40 - IPV6_DONTFRAG = 0x3e - IPV6_DSTOPTS = 0x32 - IPV6_FAITH = 0x1d - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 - IPV6_FLOW_ECN_MASK = 0x3000 - IPV6_FRAGTTL = 0x3c - IPV6_FW_ADD = 0x1e - IPV6_FW_DEL = 0x1f - IPV6_FW_FLUSH = 0x20 - IPV6_FW_GET = 0x22 - IPV6_FW_ZERO = 0x21 - IPV6_HLIMDEC = 0x1 - IPV6_HOPLIMIT = 0x2f - IPV6_HOPOPTS = 0x31 - IPV6_IPSEC_POLICY = 0x1c - IPV6_JOIN_GROUP = 0xc - IPV6_LEAVE_GROUP = 0xd - IPV6_MAXHLIM = 0xff - IPV6_MAXOPTHDR = 0x800 - IPV6_MAXPACKET = 0xffff - IPV6_MAX_GROUP_SRC_FILTER = 0x200 - IPV6_MAX_MEMBERSHIPS = 0xfff - IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f - IPV6_MMTU = 0x500 - IPV6_MSFILTER = 0x4a - IPV6_MULTICAST_HOPS = 0xa - IPV6_MULTICAST_IF = 0x9 - IPV6_MULTICAST_LOOP = 0xb - IPV6_NEXTHOP = 0x30 - IPV6_PATHMTU = 0x2c - IPV6_PKTINFO = 0x2e - IPV6_PORTRANGE = 0xe - IPV6_PORTRANGE_DEFAULT = 0x0 - IPV6_PORTRANGE_HIGH = 0x1 - IPV6_PORTRANGE_LOW = 0x2 - IPV6_PREFER_TEMPADDR = 0x3f - IPV6_RECVDSTOPTS = 0x28 - IPV6_RECVHOPLIMIT = 0x25 - IPV6_RECVHOPOPTS = 0x27 - IPV6_RECVPATHMTU = 0x2b - IPV6_RECVPKTINFO = 0x3d - IPV6_RECVRTHDR = 0x26 - IPV6_RECVTCLASS = 0x23 - IPV6_RTHDR = 0x33 - IPV6_RTHDRDSTOPTS = 0x39 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_SOCKOPT_RESERVED1 = 0x3 - IPV6_TCLASS = 0x24 - IPV6_UNICAST_HOPS = 0x4 - IPV6_USE_MIN_MTU = 0x2a - IPV6_V6ONLY = 0x1b - IPV6_VERSION = 0x60 - IPV6_VERSION_MASK = 0xf0 - IP_ADD_MEMBERSHIP = 0xc - IP_ADD_SOURCE_MEMBERSHIP = 0x46 - IP_BLOCK_SOURCE = 0x48 - IP_BOUND_IF = 0x19 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DONTFRAG = 0x1c - IP_DROP_MEMBERSHIP = 0xd - IP_DROP_SOURCE_MEMBERSHIP = 0x47 - IP_DUMMYNET_CONFIGURE = 0x3c - IP_DUMMYNET_DEL = 0x3d - IP_DUMMYNET_FLUSH = 0x3e - IP_DUMMYNET_GET = 0x40 - IP_FAITH = 0x16 - IP_FW_ADD = 0x28 - IP_FW_DEL = 0x29 - IP_FW_FLUSH = 0x2a - IP_FW_GET = 0x2c - IP_FW_RESETLOG = 0x2d - IP_FW_ZERO = 0x2b - IP_HDRINCL = 0x2 - IP_IPSEC_POLICY = 0x15 - IP_MAXPACKET = 0xffff - IP_MAX_GROUP_SRC_FILTER = 0x200 - IP_MAX_MEMBERSHIPS = 0xfff - IP_MAX_SOCK_MUTE_FILTER = 0x80 - IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MF = 0x2000 - IP_MIN_MEMBERSHIPS = 0x1f - IP_MSFILTER = 0x4a - IP_MSS = 0x240 - IP_MULTICAST_IF = 0x9 - IP_MULTICAST_IFINDEX = 0x42 - IP_MULTICAST_LOOP = 0xb - IP_MULTICAST_TTL = 0xa - IP_MULTICAST_VIF = 0xe - IP_NAT__XXX = 0x37 - IP_OFFMASK = 0x1fff - IP_OLD_FW_ADD = 0x32 - IP_OLD_FW_DEL = 0x33 - IP_OLD_FW_FLUSH = 0x34 - IP_OLD_FW_GET = 0x36 - IP_OLD_FW_RESETLOG = 0x38 - IP_OLD_FW_ZERO = 0x35 - IP_OPTIONS = 0x1 - IP_PKTINFO = 0x1a - IP_PORTRANGE = 0x13 - IP_PORTRANGE_DEFAULT = 0x0 - IP_PORTRANGE_HIGH = 0x1 - IP_PORTRANGE_LOW = 0x2 - IP_RECVDSTADDR = 0x7 - IP_RECVIF = 0x14 - IP_RECVOPTS = 0x5 - IP_RECVPKTINFO = 0x1a - IP_RECVRETOPTS = 0x6 - IP_RECVTOS = 0x1b - IP_RECVTTL = 0x18 - IP_RETOPTS = 0x8 - IP_RF = 0x8000 - IP_RSVP_OFF = 0x10 - IP_RSVP_ON = 0xf - IP_RSVP_VIF_OFF = 0x12 - IP_RSVP_VIF_ON = 0x11 - IP_STRIPHDR = 0x17 - IP_TOS = 0x3 - IP_TRAFFIC_MGT_BACKGROUND = 0x41 - IP_TTL = 0x4 - IP_UNBLOCK_SOURCE = 0x49 - ISIG = 0x80 - ISTRIP = 0x20 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x400 - IXON = 0x200 - KERN_HOSTNAME = 0xa - KERN_OSRELEASE = 0x2 - KERN_OSTYPE = 0x1 - KERN_VERSION = 0x4 - LOCAL_PEERCRED = 0x1 - LOCAL_PEEREPID = 0x3 - LOCAL_PEEREUUID = 0x5 - LOCAL_PEERPID = 0x2 - LOCAL_PEERTOKEN = 0x6 - LOCAL_PEERUUID = 0x4 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - MADV_CAN_REUSE = 0x9 - MADV_DONTNEED = 0x4 - MADV_FREE = 0x5 - MADV_FREE_REUSABLE = 0x7 - MADV_FREE_REUSE = 0x8 - MADV_NORMAL = 0x0 - MADV_PAGEOUT = 0xa - MADV_RANDOM = 0x1 - MADV_SEQUENTIAL = 0x2 - MADV_WILLNEED = 0x3 - MADV_ZERO_WIRED_PAGES = 0x6 - MAP_32BIT = 0x8000 - MAP_ANON = 0x1000 - MAP_ANONYMOUS = 0x1000 - MAP_COPY = 0x2 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_HASSEMAPHORE = 0x200 - MAP_JIT = 0x800 - MAP_NOCACHE = 0x400 - MAP_NOEXTEND = 0x100 - MAP_NORESERVE = 0x40 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x20 - MAP_RESERVED0080 = 0x80 - MAP_RESILIENT_CODESIGN = 0x2000 - MAP_RESILIENT_MEDIA = 0x4000 - MAP_SHARED = 0x1 - MAP_TRANSLATED_ALLOW_EXECUTE = 0x20000 - MAP_UNIX03 = 0x40000 - MCAST_BLOCK_SOURCE = 0x54 - MCAST_EXCLUDE = 0x2 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x50 - MCAST_JOIN_SOURCE_GROUP = 0x52 - MCAST_LEAVE_GROUP = 0x51 - MCAST_LEAVE_SOURCE_GROUP = 0x53 - MCAST_UNBLOCK_SOURCE = 0x55 - MCAST_UNDEFINED = 0x0 - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MNT_ASYNC = 0x40 - MNT_AUTOMOUNTED = 0x400000 - MNT_CMDFLAGS = 0xf0000 - MNT_CPROTECT = 0x80 - MNT_DEFWRITE = 0x2000000 - MNT_DONTBROWSE = 0x100000 - MNT_DOVOLFS = 0x8000 - MNT_DWAIT = 0x4 - MNT_EXPORTED = 0x100 - MNT_EXT_ROOT_DATA_VOL = 0x1 - MNT_FORCE = 0x80000 - MNT_IGNORE_OWNERSHIP = 0x200000 - MNT_JOURNALED = 0x800000 - MNT_LOCAL = 0x1000 - MNT_MULTILABEL = 0x4000000 - MNT_NOATIME = 0x10000000 - MNT_NOBLOCK = 0x20000 - MNT_NODEV = 0x10 - MNT_NOEXEC = 0x4 - MNT_NOSUID = 0x8 - MNT_NOUSERXATTR = 0x1000000 - MNT_NOWAIT = 0x2 - MNT_QUARANTINE = 0x400 - MNT_QUOTA = 0x2000 - MNT_RDONLY = 0x1 - MNT_RELOAD = 0x40000 - MNT_REMOVABLE = 0x200 - MNT_ROOTFS = 0x4000 - MNT_SNAPSHOT = 0x40000000 - MNT_STRICTATIME = 0x80000000 - MNT_SYNCHRONOUS = 0x2 - MNT_UNION = 0x20 - MNT_UNKNOWNPERMISSIONS = 0x200000 - MNT_UPDATE = 0x10000 - MNT_VISFLAGMASK = 0xd7f0f7ff - MNT_WAIT = 0x1 - MSG_CTRUNC = 0x20 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x80 - MSG_EOF = 0x100 - MSG_EOR = 0x8 - MSG_FLUSH = 0x400 - MSG_HAVEMORE = 0x2000 - MSG_HOLD = 0x800 - MSG_NEEDSA = 0x10000 - MSG_NOSIGNAL = 0x80000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_RCVMORE = 0x4000 - MSG_SEND = 0x1000 - MSG_TRUNC = 0x10 - MSG_WAITALL = 0x40 - MSG_WAITSTREAM = 0x200 - MS_ASYNC = 0x1 - MS_DEACTIVATE = 0x8 - MS_INVALIDATE = 0x2 - MS_KILLPAGES = 0x4 - MS_SYNC = 0x10 - NAME_MAX = 0xff - NET_RT_DUMP = 0x1 - NET_RT_DUMP2 = 0x7 - NET_RT_FLAGS = 0x2 - NET_RT_FLAGS_PRIV = 0xa - NET_RT_IFLIST = 0x3 - NET_RT_IFLIST2 = 0x6 - NET_RT_MAXID = 0xb - NET_RT_STAT = 0x4 - NET_RT_TRASH = 0x5 - NFDBITS = 0x20 - NL0 = 0x0 - NL1 = 0x100 - NL2 = 0x200 - NL3 = 0x300 - NLDLY = 0x300 - NOFLSH = 0x80000000 - NOKERNINFO = 0x2000000 - NOTE_ABSOLUTE = 0x8 - NOTE_ATTRIB = 0x8 - NOTE_BACKGROUND = 0x40 - NOTE_CHILD = 0x4 - NOTE_CRITICAL = 0x20 - NOTE_DELETE = 0x1 - NOTE_EXEC = 0x20000000 - NOTE_EXIT = 0x80000000 - NOTE_EXITSTATUS = 0x4000000 - NOTE_EXIT_CSERROR = 0x40000 - NOTE_EXIT_DECRYPTFAIL = 0x10000 - NOTE_EXIT_DETAIL = 0x2000000 - NOTE_EXIT_DETAIL_MASK = 0x70000 - NOTE_EXIT_MEMORY = 0x20000 - NOTE_EXIT_REPARENTED = 0x80000 - NOTE_EXTEND = 0x4 - NOTE_FFAND = 0x40000000 - NOTE_FFCOPY = 0xc0000000 - NOTE_FFCTRLMASK = 0xc0000000 - NOTE_FFLAGSMASK = 0xffffff - NOTE_FFNOP = 0x0 - NOTE_FFOR = 0x80000000 - NOTE_FORK = 0x40000000 - NOTE_FUNLOCK = 0x100 - NOTE_LEEWAY = 0x10 - NOTE_LINK = 0x10 - NOTE_LOWAT = 0x1 - NOTE_MACHTIME = 0x100 - NOTE_MACH_CONTINUOUS_TIME = 0x80 - NOTE_NONE = 0x80 - NOTE_NSECONDS = 0x4 - NOTE_OOB = 0x2 - NOTE_PCTRLMASK = -0x100000 - NOTE_PDATAMASK = 0xfffff - NOTE_REAP = 0x10000000 - NOTE_RENAME = 0x20 - NOTE_REVOKE = 0x40 - NOTE_SECONDS = 0x1 - NOTE_SIGNAL = 0x8000000 - NOTE_TRACK = 0x1 - NOTE_TRACKERR = 0x2 - NOTE_TRIGGER = 0x1000000 - NOTE_USECONDS = 0x2 - NOTE_VM_ERROR = 0x10000000 - NOTE_VM_PRESSURE = 0x80000000 - NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000 - NOTE_VM_PRESSURE_TERMINATE = 0x40000000 - NOTE_WRITE = 0x2 - OCRNL = 0x10 - OFDEL = 0x20000 - OFILL = 0x80 - ONLCR = 0x2 - ONLRET = 0x40 - ONOCR = 0x20 - ONOEOT = 0x8 - OPOST = 0x1 - OXTABS = 0x4 - O_ACCMODE = 0x3 - O_ALERT = 0x20000000 - O_APPEND = 0x8 - O_ASYNC = 0x40 - O_CLOEXEC = 0x1000000 - O_CREAT = 0x200 - O_DIRECTORY = 0x100000 - O_DP_GETRAWENCRYPTED = 0x1 - O_DP_GETRAWUNENCRYPTED = 0x2 - O_DSYNC = 0x400000 - O_EVTONLY = 0x8000 - O_EXCL = 0x800 - O_EXLOCK = 0x20 - O_FSYNC = 0x80 - O_NDELAY = 0x4 - O_NOCTTY = 0x20000 - O_NOFOLLOW = 0x100 - O_NOFOLLOW_ANY = 0x20000000 - O_NONBLOCK = 0x4 - O_POPUP = 0x80000000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_SHLOCK = 0x10 - O_SYMLINK = 0x200000 - O_SYNC = 0x80 - O_TRUNC = 0x400 - O_WRONLY = 0x1 - PARENB = 0x1000 - PARMRK = 0x8 - PARODD = 0x2000 - PENDIN = 0x20000000 - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROT_EXEC = 0x4 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PT_ATTACH = 0xa - PT_ATTACHEXC = 0xe - PT_CONTINUE = 0x7 - PT_DENY_ATTACH = 0x1f - PT_DETACH = 0xb - PT_FIRSTMACH = 0x20 - PT_FORCEQUOTA = 0x1e - PT_KILL = 0x8 - PT_READ_D = 0x2 - PT_READ_I = 0x1 - PT_READ_U = 0x3 - PT_SIGEXC = 0xc - PT_STEP = 0x9 - PT_THUPDATE = 0xd - PT_TRACE_ME = 0x0 - PT_WRITE_D = 0x5 - PT_WRITE_I = 0x4 - PT_WRITE_U = 0x6 - RLIMIT_AS = 0x5 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_CPU_USAGE_MONITOR = 0x2 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_MEMLOCK = 0x6 - RLIMIT_NOFILE = 0x8 - RLIMIT_NPROC = 0x7 - RLIMIT_RSS = 0x5 - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0x7fffffffffffffff - RTAX_AUTHOR = 0x6 - RTAX_BRD = 0x7 - RTAX_DST = 0x0 - RTAX_GATEWAY = 0x1 - RTAX_GENMASK = 0x3 - RTAX_IFA = 0x5 - RTAX_IFP = 0x4 - RTAX_MAX = 0x8 - RTAX_NETMASK = 0x2 - RTA_AUTHOR = 0x40 - RTA_BRD = 0x80 - RTA_DST = 0x1 - RTA_GATEWAY = 0x2 - RTA_GENMASK = 0x8 - RTA_IFA = 0x20 - RTA_IFP = 0x10 - RTA_NETMASK = 0x4 - RTF_BLACKHOLE = 0x1000 - RTF_BROADCAST = 0x400000 - RTF_CLONING = 0x100 - RTF_CONDEMNED = 0x2000000 - RTF_DEAD = 0x20000000 - RTF_DELCLONE = 0x80 - RTF_DONE = 0x40 - RTF_DYNAMIC = 0x10 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_IFREF = 0x4000000 - RTF_IFSCOPE = 0x1000000 - RTF_LLDATA = 0x400 - RTF_LLINFO = 0x400 - RTF_LOCAL = 0x200000 - RTF_MODIFIED = 0x20 - RTF_MULTICAST = 0x800000 - RTF_NOIFREF = 0x2000 - RTF_PINNED = 0x100000 - RTF_PRCLONING = 0x10000 - RTF_PROTO1 = 0x8000 - RTF_PROTO2 = 0x4000 - RTF_PROTO3 = 0x40000 - RTF_PROXY = 0x8000000 - RTF_REJECT = 0x8 - RTF_ROUTER = 0x10000000 - RTF_STATIC = 0x800 - RTF_UP = 0x1 - RTF_WASCLONED = 0x20000 - RTF_XRESOLVE = 0x200 - RTM_ADD = 0x1 - RTM_CHANGE = 0x3 - RTM_DELADDR = 0xd - RTM_DELETE = 0x2 - RTM_DELMADDR = 0x10 - RTM_GET = 0x4 - RTM_GET2 = 0x14 - RTM_IFINFO = 0xe - RTM_IFINFO2 = 0x12 - RTM_LOCK = 0x8 - RTM_LOSING = 0x5 - RTM_MISS = 0x7 - RTM_NEWADDR = 0xc - RTM_NEWMADDR = 0xf - RTM_NEWMADDR2 = 0x13 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - RTM_REDIRECT = 0x6 - RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 - RTM_VERSION = 0x5 - RTV_EXPIRE = 0x4 - RTV_HOPCOUNT = 0x2 - RTV_MTU = 0x1 - RTV_RPIPE = 0x8 - RTV_RTT = 0x40 - RTV_RTTVAR = 0x80 - RTV_SPIPE = 0x10 - RTV_SSTHRESH = 0x20 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - SCM_CREDS = 0x3 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x2 - SCM_TIMESTAMP_MONOTONIC = 0x4 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDMULTI = 0x80206931 - SIOCAIFADDR = 0x8040691a - SIOCARPIPLL = 0xc0206928 - SIOCATMARK = 0x40047307 - SIOCAUTOADDR = 0xc0206926 - SIOCAUTONETMASK = 0x80206927 - SIOCDELMULTI = 0x80206932 - SIOCDIFADDR = 0x80206919 - SIOCDIFPHYADDR = 0x80206941 - SIOCGDRVSPEC = 0xc028697b - SIOCGETVLAN = 0xc020697f - SIOCGHIWAT = 0x40047301 - SIOCGIF6LOWPAN = 0xc02069c5 - SIOCGIFADDR = 0xc0206921 - SIOCGIFALTMTU = 0xc0206948 - SIOCGIFASYNCMAP = 0xc020697c - SIOCGIFBOND = 0xc0206947 - SIOCGIFBRDADDR = 0xc0206923 - SIOCGIFCAP = 0xc020695b - SIOCGIFCONF = 0xc00c6924 - SIOCGIFDEVMTU = 0xc0206944 - SIOCGIFDSTADDR = 0xc0206922 - SIOCGIFFLAGS = 0xc0206911 - SIOCGIFFUNCTIONALTYPE = 0xc02069ad - SIOCGIFGENERIC = 0xc020693a - SIOCGIFKPI = 0xc0206987 - SIOCGIFMAC = 0xc0206982 - SIOCGIFMEDIA = 0xc02c6938 - SIOCGIFMETRIC = 0xc0206917 - SIOCGIFMTU = 0xc0206933 - SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206940 - SIOCGIFPHYS = 0xc0206935 - SIOCGIFPSRCADDR = 0xc020693f - SIOCGIFSTATUS = 0xc331693d - SIOCGIFVLAN = 0xc020697f - SIOCGIFWAKEFLAGS = 0xc0206988 - SIOCGIFXMEDIA = 0xc02c6948 - SIOCGLOWAT = 0x40047303 - SIOCGPGRP = 0x40047309 - SIOCIFCREATE = 0xc0206978 - SIOCIFCREATE2 = 0xc020697a - SIOCIFDESTROY = 0x80206979 - SIOCIFGCLONERS = 0xc0106981 - SIOCRSLVMULTI = 0xc010693b - SIOCSDRVSPEC = 0x8028697b - SIOCSETVLAN = 0x8020697e - SIOCSHIWAT = 0x80047300 - SIOCSIF6LOWPAN = 0x802069c4 - SIOCSIFADDR = 0x8020690c - SIOCSIFALTMTU = 0x80206945 - SIOCSIFASYNCMAP = 0x8020697d - SIOCSIFBOND = 0x80206946 - SIOCSIFBRDADDR = 0x80206913 - SIOCSIFCAP = 0x8020695a - SIOCSIFDSTADDR = 0x8020690e - SIOCSIFFLAGS = 0x80206910 - SIOCSIFGENERIC = 0x80206939 - SIOCSIFKPI = 0x80206986 - SIOCSIFLLADDR = 0x8020693c - SIOCSIFMAC = 0x80206983 - SIOCSIFMEDIA = 0xc0206937 - SIOCSIFMETRIC = 0x80206918 - SIOCSIFMTU = 0x80206934 - SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x8040693e - SIOCSIFPHYS = 0x80206936 - SIOCSIFVLAN = 0x8020697e - SIOCSLOWAT = 0x80047302 - SIOCSPGRP = 0x80047308 - SOCK_DGRAM = 0x2 - SOCK_MAXADDRLEN = 0xff - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_LOCAL = 0x0 - SOL_SOCKET = 0xffff - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x2 - SO_BROADCAST = 0x20 - SO_DEBUG = 0x1 - SO_DONTROUTE = 0x10 - SO_DONTTRUNC = 0x2000 - SO_ERROR = 0x1007 - SO_KEEPALIVE = 0x8 - SO_LABEL = 0x1010 - SO_LINGER = 0x80 - SO_LINGER_SEC = 0x1080 - SO_NETSVC_MARKING_LEVEL = 0x1119 - SO_NET_SERVICE_TYPE = 0x1116 - SO_NKE = 0x1021 - SO_NOADDRERR = 0x1023 - SO_NOSIGPIPE = 0x1022 - SO_NOTIFYCONFLICT = 0x1026 - SO_NP_EXTENSIONS = 0x1083 - SO_NREAD = 0x1020 - SO_NUMRCVPKT = 0x1112 - SO_NWRITE = 0x1024 - SO_OOBINLINE = 0x100 - SO_PEERLABEL = 0x1011 - SO_RANDOMPORT = 0x1082 - SO_RCVBUF = 0x1002 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_REUSESHAREUID = 0x1025 - SO_SNDBUF = 0x1001 - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_TIMESTAMP = 0x400 - SO_TIMESTAMP_MONOTONIC = 0x800 - SO_TYPE = 0x1008 - SO_UPCALLCLOSEWAIT = 0x1027 - SO_USELOOPBACK = 0x40 - SO_WANTMORE = 0x4000 - SO_WANTOOBFLAG = 0x8000 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IFWHT = 0xe000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISTXT = 0x200 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x400 - TAB2 = 0x800 - TAB3 = 0x4 - TABDLY = 0xc04 - TCIFLUSH = 0x1 - TCIOFF = 0x3 - TCIOFLUSH = 0x3 - TCION = 0x4 - TCOFLUSH = 0x2 - TCOOFF = 0x1 - TCOON = 0x2 - TCP_CONNECTIONTIMEOUT = 0x20 - TCP_CONNECTION_INFO = 0x106 - TCP_ENABLE_ECN = 0x104 - TCP_FASTOPEN = 0x105 - TCP_KEEPALIVE = 0x10 - TCP_KEEPCNT = 0x102 - TCP_KEEPINTVL = 0x101 - TCP_MAXHLEN = 0x3c - TCP_MAXOLEN = 0x28 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_SACK = 0x4 - TCP_MAX_WINSHIFT = 0xe - TCP_MINMSS = 0xd8 - TCP_MSS = 0x200 - TCP_NODELAY = 0x1 - TCP_NOOPT = 0x8 - TCP_NOPUSH = 0x4 - TCP_NOTSENT_LOWAT = 0x201 - TCP_RXT_CONNDROPTIME = 0x80 - TCP_RXT_FINDROP = 0x100 - TCP_SENDMOREACKS = 0x103 - TCSAFLUSH = 0x2 - TIOCCBRK = 0x2000747a - TIOCCDTR = 0x20007478 - TIOCCONS = 0x80047462 - TIOCDCDTIMESTAMP = 0x40107458 - TIOCDRAIN = 0x2000745e - TIOCDSIMICROCODE = 0x20007455 - TIOCEXCL = 0x2000740d - TIOCEXT = 0x80047460 - TIOCFLUSH = 0x80047410 - TIOCGDRAINWAIT = 0x40047456 - TIOCGETA = 0x40487413 - TIOCGETD = 0x4004741a - TIOCGPGRP = 0x40047477 - TIOCGWINSZ = 0x40087468 - TIOCIXOFF = 0x20007480 - TIOCIXON = 0x20007481 - TIOCMBIC = 0x8004746b - TIOCMBIS = 0x8004746c - TIOCMGDTRWAIT = 0x4004745a - TIOCMGET = 0x4004746a - TIOCMODG = 0x40047403 - TIOCMODS = 0x80047404 - TIOCMSDTRWAIT = 0x8004745b - TIOCMSET = 0x8004746d - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x20007471 - TIOCNXCL = 0x2000740e - TIOCOUTQ = 0x40047473 - TIOCPKT = 0x80047470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCPTYGNAME = 0x40807453 - TIOCPTYGRANT = 0x20007454 - TIOCPTYUNLK = 0x20007452 - TIOCREMOTE = 0x80047469 - TIOCSBRK = 0x2000747b - TIOCSCONS = 0x20007463 - TIOCSCTTY = 0x20007461 - TIOCSDRAINWAIT = 0x80047457 - TIOCSDTR = 0x20007479 - TIOCSETA = 0x80487414 - TIOCSETAF = 0x80487416 - TIOCSETAW = 0x80487415 - TIOCSETD = 0x8004741b - TIOCSIG = 0x2000745f - TIOCSPGRP = 0x80047476 - TIOCSTART = 0x2000746e - TIOCSTAT = 0x20007465 - TIOCSTI = 0x80017472 - TIOCSTOP = 0x2000746f - TIOCSWINSZ = 0x80087467 - TIOCTIMESTAMP = 0x40107459 - TIOCUCNTL = 0x80047466 - TOSTOP = 0x400000 - VDISCARD = 0xf - VDSUSP = 0xb - VEOF = 0x0 - VEOL = 0x1 - VEOL2 = 0x2 - VERASE = 0x3 - VINTR = 0x8 - VKILL = 0x5 - VLNEXT = 0xe - VMIN = 0x10 - VM_LOADAVG = 0x2 - VM_MACHFACTOR = 0x4 - VM_MAXID = 0x6 - VM_METER = 0x1 - VM_SWAPUSAGE = 0x5 - VQUIT = 0x9 - VREPRINT = 0x6 - VSTART = 0xc - VSTATUS = 0x12 - VSTOP = 0xd - VSUSP = 0xa - VT0 = 0x0 - VT1 = 0x10000 - VTDLY = 0x10000 - VTIME = 0x11 - VWERASE = 0x4 - WCONTINUED = 0x10 - WCOREFLAG = 0x80 - WEXITED = 0x4 - WNOHANG = 0x1 - WNOWAIT = 0x20 - WORDSIZE = 0x40 - WSTOPPED = 0x8 - WUNTRACED = 0x2 - XATTR_CREATE = 0x2 - XATTR_NODEFAULT = 0x10 - XATTR_NOFOLLOW = 0x1 - XATTR_NOSECURITY = 0x8 - XATTR_REPLACE = 0x4 - XATTR_SHOWCOMPRESSION = 0x20 + AF_APPLETALK = 0x10 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1c + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1e + AF_IPX = 0x17 + AF_ISDN = 0x1c + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x29 + AF_NATM = 0x1f + AF_NDRV = 0x1b + AF_NETBIOS = 0x21 + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PPP = 0x22 + AF_PUP = 0x4 + AF_RESERVED_36 = 0x24 + AF_ROUTE = 0x11 + AF_SIP = 0x18 + AF_SNA = 0xb + AF_SYSTEM = 0x20 + AF_SYS_CONTROL = 0x2 + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_UTUN = 0x26 + AF_VSOCK = 0x28 + ALTWERASE = 0x200 + ATTR_BIT_MAP_COUNT = 0x5 + ATTR_CMN_ACCESSMASK = 0x20000 + ATTR_CMN_ACCTIME = 0x1000 + ATTR_CMN_ADDEDTIME = 0x10000000 + ATTR_CMN_BKUPTIME = 0x2000 + ATTR_CMN_CHGTIME = 0x800 + ATTR_CMN_CRTIME = 0x200 + ATTR_CMN_DATA_PROTECT_FLAGS = 0x40000000 + ATTR_CMN_DEVID = 0x2 + ATTR_CMN_DOCUMENT_ID = 0x100000 + ATTR_CMN_ERROR = 0x20000000 + ATTR_CMN_EXTENDED_SECURITY = 0x400000 + ATTR_CMN_FILEID = 0x2000000 + ATTR_CMN_FLAGS = 0x40000 + ATTR_CMN_FNDRINFO = 0x4000 + ATTR_CMN_FSID = 0x4 + ATTR_CMN_FULLPATH = 0x8000000 + ATTR_CMN_GEN_COUNT = 0x80000 + ATTR_CMN_GRPID = 0x10000 + ATTR_CMN_GRPUUID = 0x1000000 + ATTR_CMN_MODTIME = 0x400 + ATTR_CMN_NAME = 0x1 + ATTR_CMN_NAMEDATTRCOUNT = 0x80000 + ATTR_CMN_NAMEDATTRLIST = 0x100000 + ATTR_CMN_OBJID = 0x20 + ATTR_CMN_OBJPERMANENTID = 0x40 + ATTR_CMN_OBJTAG = 0x10 + ATTR_CMN_OBJTYPE = 0x8 + ATTR_CMN_OWNERID = 0x8000 + ATTR_CMN_PARENTID = 0x4000000 + ATTR_CMN_PAROBJID = 0x80 + ATTR_CMN_RETURNED_ATTRS = 0x80000000 + ATTR_CMN_SCRIPT = 0x100 + ATTR_CMN_SETMASK = 0x51c7ff00 + ATTR_CMN_USERACCESS = 0x200000 + ATTR_CMN_UUID = 0x800000 + ATTR_CMN_VALIDMASK = 0xffffffff + ATTR_CMN_VOLSETMASK = 0x6700 + ATTR_FILE_ALLOCSIZE = 0x4 + ATTR_FILE_CLUMPSIZE = 0x10 + ATTR_FILE_DATAALLOCSIZE = 0x400 + ATTR_FILE_DATAEXTENTS = 0x800 + ATTR_FILE_DATALENGTH = 0x200 + ATTR_FILE_DEVTYPE = 0x20 + ATTR_FILE_FILETYPE = 0x40 + ATTR_FILE_FORKCOUNT = 0x80 + ATTR_FILE_FORKLIST = 0x100 + ATTR_FILE_IOBLOCKSIZE = 0x8 + ATTR_FILE_LINKCOUNT = 0x1 + ATTR_FILE_RSRCALLOCSIZE = 0x2000 + ATTR_FILE_RSRCEXTENTS = 0x4000 + ATTR_FILE_RSRCLENGTH = 0x1000 + ATTR_FILE_SETMASK = 0x20 + ATTR_FILE_TOTALSIZE = 0x2 + ATTR_FILE_VALIDMASK = 0x37ff + ATTR_VOL_ALLOCATIONCLUMP = 0x40 + ATTR_VOL_ATTRIBUTES = 0x40000000 + ATTR_VOL_CAPABILITIES = 0x20000 + ATTR_VOL_DIRCOUNT = 0x400 + ATTR_VOL_ENCODINGSUSED = 0x10000 + ATTR_VOL_FILECOUNT = 0x200 + ATTR_VOL_FSTYPE = 0x1 + ATTR_VOL_INFO = 0x80000000 + ATTR_VOL_IOBLOCKSIZE = 0x80 + ATTR_VOL_MAXOBJCOUNT = 0x800 + ATTR_VOL_MINALLOCATION = 0x20 + ATTR_VOL_MOUNTEDDEVICE = 0x8000 + ATTR_VOL_MOUNTFLAGS = 0x4000 + ATTR_VOL_MOUNTPOINT = 0x1000 + ATTR_VOL_NAME = 0x2000 + ATTR_VOL_OBJCOUNT = 0x100 + ATTR_VOL_QUOTA_SIZE = 0x10000000 + ATTR_VOL_RESERVED_SIZE = 0x20000000 + ATTR_VOL_SETMASK = 0x80002000 + ATTR_VOL_SIGNATURE = 0x2 + ATTR_VOL_SIZE = 0x4 + ATTR_VOL_SPACEAVAIL = 0x10 + ATTR_VOL_SPACEFREE = 0x8 + ATTR_VOL_SPACEUSED = 0x800000 + ATTR_VOL_UUID = 0x40000 + ATTR_VOL_VALIDMASK = 0xf087ffff + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc00c4279 + BIOCGETIF = 0x4020426b + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044278 + BIOCSETF = 0x80104267 + BIOCSETFNR = 0x8010427e + BIOCSETIF = 0x8020426c + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x8000 + BSDLY = 0x8000 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_MONOTONIC = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_MONOTONIC_RAW_APPROX = 0x5 + CLOCK_PROCESS_CPUTIME_ID = 0xc + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x10 + CLOCK_UPTIME_RAW = 0x8 + CLOCK_UPTIME_RAW_APPROX = 0x9 + CLONE_NOFOLLOW = 0x1 + CLONE_NOOWNERCOPY = 0x2 + CR0 = 0x0 + CR1 = 0x1000 + CR2 = 0x2000 + CR3 = 0x3000 + CRDLY = 0x3000 + CREAD = 0x800 + CRTSCTS = 0x30000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTLIOCGINFO = 0xc0644e03 + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0x10a + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_DARWIN = 0x10a + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_EXCEPT = -0xf + EVFILT_FS = -0x9 + EVFILT_MACHPORT = -0x8 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x11 + EVFILT_THREADMARKER = 0x11 + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xa + EVFILT_VM = -0xc + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DISPATCH2 = 0x180 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG0 = 0x1000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_OOBAND = 0x2000 + EV_POLL = 0x1000 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EV_UDATA_SPECIFIC = 0x100 + EV_VANISHED = 0x200 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x4000 + FFDLY = 0x4000 + FLUSHO = 0x800000 + FSOPT_ATTR_CMN_EXTENDED = 0x20 + FSOPT_NOFOLLOW = 0x1 + FSOPT_NOINMEMUPDATE = 0x2 + FSOPT_PACK_INVAL_ATTRS = 0x8 + FSOPT_REPORT_FULLSIZE = 0x4 + FSOPT_RETURN_REALDEV = 0x200 + F_ADDFILESIGS = 0x3d + F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 + F_ADDFILESIGS_INFO = 0x67 + F_ADDFILESIGS_RETURN = 0x61 + F_ADDFILESUPPL = 0x68 + F_ADDSIGS = 0x3b + F_ALLOCATEALL = 0x4 + F_ALLOCATECONTIG = 0x2 + F_BARRIERFSYNC = 0x55 + F_CHECK_LV = 0x62 + F_CHKCLEAN = 0x29 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x43 + F_FINDSIGS = 0x4e + F_FLUSH_DATA = 0x28 + F_FREEZE_FS = 0x35 + F_FULLFSYNC = 0x33 + F_GETCODEDIR = 0x48 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETLKPID = 0x42 + F_GETNOSIGPIPE = 0x4a + F_GETOWN = 0x5 + F_GETPATH = 0x32 + F_GETPATH_MTMINFO = 0x47 + F_GETPATH_NOFIRMLINK = 0x66 + F_GETPROTECTIONCLASS = 0x3f + F_GETPROTECTIONLEVEL = 0x4d + F_GETSIGSINFO = 0x69 + F_GLOBAL_NOCACHE = 0x37 + F_LOG2PHYS = 0x31 + F_LOG2PHYS_EXT = 0x41 + F_NOCACHE = 0x30 + F_NODIRECT = 0x3e + F_OK = 0x0 + F_PATHPKG_CHECK = 0x34 + F_PEOFPOSMODE = 0x3 + F_PREALLOCATE = 0x2a + F_PUNCHHOLE = 0x63 + F_RDADVISE = 0x2c + F_RDAHEAD = 0x2d + F_RDLCK = 0x1 + F_SETBACKINGSTORE = 0x46 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETLKWTIMEOUT = 0xa + F_SETNOSIGPIPE = 0x49 + F_SETOWN = 0x6 + F_SETPROTECTIONCLASS = 0x40 + F_SETSIZE = 0x2b + F_SINGLE_WRITER = 0x4c + F_SPECULATIVE_READ = 0x65 + F_THAW_FS = 0x36 + F_TRANSCODEKEY = 0x4b + F_TRIM_ACTIVE_FILE = 0x64 + F_UNLCK = 0x2 + F_VOLPOSMODE = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_6LOWPAN = 0x40 + IFT_AAL5 = 0x31 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ATM = 0x25 + IFT_BRIDGE = 0xd1 + IFT_CARP = 0xf8 + IFT_CELLULAR = 0xff + IFT_CEPT = 0x13 + IFT_DS3 = 0x1e + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_ETHER = 0x6 + IFT_FAITH = 0x38 + IFT_FDDI = 0xf + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_GIF = 0x37 + IFT_HDH1822 = 0x3 + IFT_HIPPI = 0x2f + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IEEE1394 = 0x90 + IFT_IEEE8023ADLAG = 0x88 + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88026 = 0xa + IFT_L2VLAN = 0x87 + IFT_LAPB = 0x10 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_NSIP = 0x1b + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PDP = 0xff + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PKTAP = 0xfe + IFT_PPP = 0x17 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PTPSERIAL = 0x16 + IFT_RS232 = 0x21 + IFT_SDLC = 0x11 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_STARLAN = 0xb + IFT_STF = 0x39 + IFT_T1 = 0x12 + IFT_ULTRA = 0x1d + IFT_V35 = 0x2d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LINKLOCALNETNUM = 0xa9fe0000 + IN_LOOPBACKNET = 0x7f + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x400473d1 + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0xfe + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEP = 0x21 + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_2292DSTOPTS = 0x17 + IPV6_2292HOPLIMIT = 0x14 + IPV6_2292HOPOPTS = 0x16 + IPV6_2292NEXTHOP = 0x15 + IPV6_2292PKTINFO = 0x13 + IPV6_2292PKTOPTIONS = 0x19 + IPV6_2292RTHDR = 0x18 + IPV6_3542DSTOPTS = 0x32 + IPV6_3542HOPLIMIT = 0x2f + IPV6_3542HOPOPTS = 0x31 + IPV6_3542NEXTHOP = 0x30 + IPV6_3542PKTINFO = 0x2e + IPV6_3542RTHDR = 0x33 + IPV6_ADDR_MC_FLAGS_PREFIX = 0x20 + IPV6_ADDR_MC_FLAGS_TRANSIENT = 0x10 + IPV6_ADDR_MC_FLAGS_UNICAST_BASED = 0x30 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDV6ONLY = 0x1b + IPV6_BOUND_IF = 0x7d + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FLOW_ECN_MASK = 0x3000 + IPV6_FRAGTTL = 0x3c + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x3d + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x23 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x39 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x24 + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BLOCK_SOURCE = 0x48 + IP_BOUND_IF = 0x19 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DONTFRAG = 0x1c + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW_ADD = 0x28 + IP_FW_DEL = 0x29 + IP_FW_FLUSH = 0x2a + IP_FW_GET = 0x2c + IP_FW_RESETLOG = 0x2d + IP_FW_ZERO = 0x2b + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MF = 0x2000 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_IFINDEX = 0x42 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_NAT__XXX = 0x37 + IP_OFFMASK = 0x1fff + IP_OLD_FW_ADD = 0x32 + IP_OLD_FW_DEL = 0x33 + IP_OLD_FW_FLUSH = 0x34 + IP_OLD_FW_GET = 0x36 + IP_OLD_FW_RESETLOG = 0x38 + IP_OLD_FW_ZERO = 0x35 + IP_OPTIONS = 0x1 + IP_PKTINFO = 0x1a + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVPKTINFO = 0x1a + IP_RECVRETOPTS = 0x6 + IP_RECVTOS = 0x1b + IP_RECVTTL = 0x18 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_STRIPHDR = 0x17 + IP_TOS = 0x3 + IP_TRAFFIC_MGT_BACKGROUND = 0x41 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LOCAL_PEERCRED = 0x1 + LOCAL_PEEREPID = 0x3 + LOCAL_PEEREUUID = 0x5 + LOCAL_PEERPID = 0x2 + LOCAL_PEERTOKEN = 0x6 + LOCAL_PEERUUID = 0x4 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_CAN_REUSE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_FREE_REUSABLE = 0x7 + MADV_FREE_REUSE = 0x8 + MADV_NORMAL = 0x0 + MADV_PAGEOUT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MADV_ZERO_WIRED_PAGES = 0x6 + MAP_32BIT = 0x8000 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_JIT = 0x800 + MAP_NOCACHE = 0x400 + MAP_NOEXTEND = 0x100 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_RESERVED0080 = 0x80 + MAP_RESILIENT_CODESIGN = 0x2000 + MAP_RESILIENT_MEDIA = 0x4000 + MAP_SHARED = 0x1 + MAP_TRANSLATED_ALLOW_EXECUTE = 0x20000 + MAP_UNIX03 = 0x40000 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x400000 + MNT_CMDFLAGS = 0xf0000 + MNT_CPROTECT = 0x80 + MNT_DEFWRITE = 0x2000000 + MNT_DONTBROWSE = 0x100000 + MNT_DOVOLFS = 0x8000 + MNT_DWAIT = 0x4 + MNT_EXPORTED = 0x100 + MNT_EXT_ROOT_DATA_VOL = 0x1 + MNT_FORCE = 0x80000 + MNT_IGNORE_OWNERSHIP = 0x200000 + MNT_JOURNALED = 0x800000 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NOATIME = 0x10000000 + MNT_NOBLOCK = 0x20000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOSUID = 0x8 + MNT_NOUSERXATTR = 0x1000000 + MNT_NOWAIT = 0x2 + MNT_QUARANTINE = 0x400 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_REMOVABLE = 0x200 + MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x40000000 + MNT_STRICTATIME = 0x80000000 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UNKNOWNPERMISSIONS = 0x200000 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0xd7f0f7ff + MNT_WAIT = 0x1 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_FLUSH = 0x400 + MSG_HAVEMORE = 0x2000 + MSG_HOLD = 0x800 + MSG_NEEDSA = 0x10000 + MSG_NOSIGNAL = 0x80000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_RCVMORE = 0x4000 + MSG_SEND = 0x1000 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITSTREAM = 0x200 + MS_ASYNC = 0x1 + MS_DEACTIVATE = 0x8 + MS_INVALIDATE = 0x2 + MS_KILLPAGES = 0x4 + MS_SYNC = 0x10 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_DUMP2 = 0x7 + NET_RT_FLAGS = 0x2 + NET_RT_FLAGS_PRIV = 0xa + NET_RT_IFLIST = 0x3 + NET_RT_IFLIST2 = 0x6 + NET_RT_MAXID = 0xb + NET_RT_STAT = 0x4 + NET_RT_TRASH = 0x5 + NFDBITS = 0x20 + NL0 = 0x0 + NL1 = 0x100 + NL2 = 0x200 + NL3 = 0x300 + NLDLY = 0x300 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ABSOLUTE = 0x8 + NOTE_ATTRIB = 0x8 + NOTE_BACKGROUND = 0x40 + NOTE_CHILD = 0x4 + NOTE_CRITICAL = 0x20 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXITSTATUS = 0x4000000 + NOTE_EXIT_CSERROR = 0x40000 + NOTE_EXIT_DECRYPTFAIL = 0x10000 + NOTE_EXIT_DETAIL = 0x2000000 + NOTE_EXIT_DETAIL_MASK = 0x70000 + NOTE_EXIT_MEMORY = 0x20000 + NOTE_EXIT_REPARENTED = 0x80000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FORK = 0x40000000 + NOTE_FUNLOCK = 0x100 + NOTE_LEEWAY = 0x10 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_MACHTIME = 0x100 + NOTE_MACH_CONTINUOUS_TIME = 0x80 + NOTE_NONE = 0x80 + NOTE_NSECONDS = 0x4 + NOTE_OOB = 0x2 + NOTE_PCTRLMASK = -0x100000 + NOTE_PDATAMASK = 0xfffff + NOTE_REAP = 0x10000000 + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_SIGNAL = 0x8000000 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x2 + NOTE_VM_ERROR = 0x10000000 + NOTE_VM_PRESSURE = 0x80000000 + NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000 + NOTE_VM_PRESSURE_TERMINATE = 0x40000000 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OFDEL = 0x20000 + OFILL = 0x80 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_ALERT = 0x20000000 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x1000000 + O_CREAT = 0x200 + O_DIRECTORY = 0x100000 + O_DP_GETRAWENCRYPTED = 0x1 + O_DP_GETRAWUNENCRYPTED = 0x2 + O_DSYNC = 0x400000 + O_EVTONLY = 0x8000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x20000 + O_NOFOLLOW = 0x100 + O_NOFOLLOW_ANY = 0x20000000 + O_NONBLOCK = 0x4 + O_POPUP = 0x80000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYMLINK = 0x200000 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PT_ATTACH = 0xa + PT_ATTACHEXC = 0xe + PT_CONTINUE = 0x7 + PT_DENY_ATTACH = 0x1f + PT_DETACH = 0xb + PT_FIRSTMACH = 0x20 + PT_FORCEQUOTA = 0x1e + PT_KILL = 0x8 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_READ_U = 0x3 + PT_SIGEXC = 0xc + PT_STEP = 0x9 + PT_THUPDATE = 0xd + PT_TRACE_ME = 0x0 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + PT_WRITE_U = 0x6 + RLIMIT_AS = 0x5 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_CPU_USAGE_MONITOR = 0x2 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CLONING = 0x100 + RTF_CONDEMNED = 0x2000000 + RTF_DEAD = 0x20000000 + RTF_DELCLONE = 0x80 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_GLOBAL = 0x40000000 + RTF_HOST = 0x4 + RTF_IFREF = 0x4000000 + RTF_IFSCOPE = 0x1000000 + RTF_LLDATA = 0x400 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_NOIFREF = 0x2000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_PROXY = 0x8000000 + RTF_REJECT = 0x8 + RTF_ROUTER = 0x10000000 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_WASCLONED = 0x20000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_GET2 = 0x14 + RTM_IFINFO = 0xe + RTM_IFINFO2 = 0x12 + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_NEWMADDR2 = 0x13 + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SCM_TIMESTAMP_MONOTONIC = 0x4 + SEEK_CUR = 0x1 + SEEK_DATA = 0x4 + SEEK_END = 0x2 + SEEK_HOLE = 0x3 + SEEK_SET = 0x0 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCARPIPLL = 0xc0206928 + SIOCATMARK = 0x40047307 + SIOCAUTOADDR = 0xc0206926 + SIOCAUTONETMASK = 0x80206927 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFPHYADDR = 0x80206941 + SIOCGDRVSPEC = 0xc028697b + SIOCGETVLAN = 0xc020697f + SIOCGHIWAT = 0x40047301 + SIOCGIF6LOWPAN = 0xc02069c5 + SIOCGIFADDR = 0xc0206921 + SIOCGIFALTMTU = 0xc0206948 + SIOCGIFASYNCMAP = 0xc020697c + SIOCGIFBOND = 0xc0206947 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020695b + SIOCGIFCONF = 0xc00c6924 + SIOCGIFDEVMTU = 0xc0206944 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFFUNCTIONALTYPE = 0xc02069ad + SIOCGIFGENERIC = 0xc020693a + SIOCGIFKPI = 0xc0206987 + SIOCGIFMAC = 0xc0206982 + SIOCGIFMEDIA = 0xc02c6938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206940 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc020693f + SIOCGIFSTATUS = 0xc331693d + SIOCGIFVLAN = 0xc020697f + SIOCGIFWAKEFLAGS = 0xc0206988 + SIOCGIFXMEDIA = 0xc02c6948 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCIFCREATE = 0xc0206978 + SIOCIFCREATE2 = 0xc020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106981 + SIOCRSLVMULTI = 0xc010693b + SIOCSDRVSPEC = 0x8028697b + SIOCSETVLAN = 0x8020697e + SIOCSHIWAT = 0x80047300 + SIOCSIF6LOWPAN = 0x802069c4 + SIOCSIFADDR = 0x8020690c + SIOCSIFALTMTU = 0x80206945 + SIOCSIFASYNCMAP = 0x8020697d + SIOCSIFBOND = 0x80206946 + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020695a + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFKPI = 0x80206986 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206983 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x8040693e + SIOCSIFPHYS = 0x80206936 + SIOCSIFVLAN = 0x8020697e + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_DONTTRUNC = 0x2000 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1010 + SO_LINGER = 0x80 + SO_LINGER_SEC = 0x1080 + SO_NETSVC_MARKING_LEVEL = 0x1119 + SO_NET_SERVICE_TYPE = 0x1116 + SO_NKE = 0x1021 + SO_NOADDRERR = 0x1023 + SO_NOSIGPIPE = 0x1022 + SO_NOTIFYCONFLICT = 0x1026 + SO_NP_EXTENSIONS = 0x1083 + SO_NREAD = 0x1020 + SO_NUMRCVPKT = 0x1112 + SO_NWRITE = 0x1024 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1011 + SO_RANDOMPORT = 0x1082 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_REUSESHAREUID = 0x1025 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TIMESTAMP_MONOTONIC = 0x800 + SO_TRACKER_ATTRIBUTE_FLAGS_APP_APPROVED = 0x1 + SO_TRACKER_ATTRIBUTE_FLAGS_DOMAIN_SHORT = 0x4 + SO_TRACKER_ATTRIBUTE_FLAGS_TRACKER = 0x2 + SO_TRACKER_TRANSPARENCY_VERSION = 0x3 + SO_TYPE = 0x1008 + SO_UPCALLCLOSEWAIT = 0x1027 + SO_USELOOPBACK = 0x40 + SO_WANTMORE = 0x4000 + SO_WANTOOBFLAG = 0x8000 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x400 + TAB2 = 0x800 + TAB3 = 0x4 + TABDLY = 0xc04 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_CC = 0xb + TCPOPT_CCECHO = 0xd + TCPOPT_CCNEW = 0xc + TCPOPT_EOL = 0x0 + TCPOPT_FASTOPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_CONNECTIONTIMEOUT = 0x20 + TCP_CONNECTION_INFO = 0x106 + TCP_ENABLE_ECN = 0x104 + TCP_FASTOPEN = 0x105 + TCP_KEEPALIVE = 0x10 + TCP_KEEPCNT = 0x102 + TCP_KEEPINTVL = 0x101 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MINMSS = 0xd8 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_NOTSENT_LOWAT = 0x201 + TCP_RXT_CONNDROPTIME = 0x80 + TCP_RXT_FINDROP = 0x100 + TCP_SENDMOREACKS = 0x103 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x40107458 + TIOCDRAIN = 0x2000745e + TIOCDSIMICROCODE = 0x20007455 + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x40487413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGWINSZ = 0x40087468 + TIOCIXOFF = 0x20007480 + TIOCIXON = 0x20007481 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMODG = 0x40047403 + TIOCMODS = 0x80047404 + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTYGNAME = 0x40807453 + TIOCPTYGRANT = 0x20007454 + TIOCPTYUNLK = 0x20007452 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCONS = 0x20007463 + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x80487414 + TIOCSETAF = 0x80487416 + TIOCSETAW = 0x80487415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2000745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMADDR_CID_ANY = 0xffffffff + VMADDR_CID_HOST = 0x2 + VMADDR_CID_HYPERVISOR = 0x0 + VMADDR_CID_RESERVED = 0x1 + VMADDR_PORT_ANY = 0xffffffff + VMIN = 0x10 + VM_LOADAVG = 0x2 + VM_MACHFACTOR = 0x4 + VM_MAXID = 0x6 + VM_METER = 0x1 + VM_SWAPUSAGE = 0x5 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VT0 = 0x0 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x10 + WCOREFLAG = 0x80 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOWAIT = 0x20 + WORDSIZE = 0x40 + WSTOPPED = 0x8 + WUNTRACED = 0x2 + XATTR_CREATE = 0x2 + XATTR_NODEFAULT = 0x10 + XATTR_NOFOLLOW = 0x1 + XATTR_NOSECURITY = 0x8 + XATTR_REPLACE = 0x4 + XATTR_SHOWCOMPRESSION = 0x20 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index e644eaf5e..e36f5178d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -12,1550 +12,1582 @@ package unix import "syscall" const ( - AF_APPLETALK = 0x10 - AF_CCITT = 0xa - AF_CHAOS = 0x5 - AF_CNT = 0x15 - AF_COIP = 0x14 - AF_DATAKIT = 0x9 - AF_DECnet = 0xc - AF_DLI = 0xd - AF_E164 = 0x1c - AF_ECMA = 0x8 - AF_HYLINK = 0xf - AF_IEEE80211 = 0x25 - AF_IMPLINK = 0x3 - AF_INET = 0x2 - AF_INET6 = 0x1e - AF_IPX = 0x17 - AF_ISDN = 0x1c - AF_ISO = 0x7 - AF_LAT = 0xe - AF_LINK = 0x12 - AF_LOCAL = 0x1 - AF_MAX = 0x29 - AF_NATM = 0x1f - AF_NDRV = 0x1b - AF_NETBIOS = 0x21 - AF_NS = 0x6 - AF_OSI = 0x7 - AF_PPP = 0x22 - AF_PUP = 0x4 - AF_RESERVED_36 = 0x24 - AF_ROUTE = 0x11 - AF_SIP = 0x18 - AF_SNA = 0xb - AF_SYSTEM = 0x20 - AF_SYS_CONTROL = 0x2 - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_UTUN = 0x26 - AF_VSOCK = 0x28 - ALTWERASE = 0x200 - ATTR_BIT_MAP_COUNT = 0x5 - ATTR_CMN_ACCESSMASK = 0x20000 - ATTR_CMN_ACCTIME = 0x1000 - ATTR_CMN_ADDEDTIME = 0x10000000 - ATTR_CMN_BKUPTIME = 0x2000 - ATTR_CMN_CHGTIME = 0x800 - ATTR_CMN_CRTIME = 0x200 - ATTR_CMN_DATA_PROTECT_FLAGS = 0x40000000 - ATTR_CMN_DEVID = 0x2 - ATTR_CMN_DOCUMENT_ID = 0x100000 - ATTR_CMN_ERROR = 0x20000000 - ATTR_CMN_EXTENDED_SECURITY = 0x400000 - ATTR_CMN_FILEID = 0x2000000 - ATTR_CMN_FLAGS = 0x40000 - ATTR_CMN_FNDRINFO = 0x4000 - ATTR_CMN_FSID = 0x4 - ATTR_CMN_FULLPATH = 0x8000000 - ATTR_CMN_GEN_COUNT = 0x80000 - ATTR_CMN_GRPID = 0x10000 - ATTR_CMN_GRPUUID = 0x1000000 - ATTR_CMN_MODTIME = 0x400 - ATTR_CMN_NAME = 0x1 - ATTR_CMN_NAMEDATTRCOUNT = 0x80000 - ATTR_CMN_NAMEDATTRLIST = 0x100000 - ATTR_CMN_OBJID = 0x20 - ATTR_CMN_OBJPERMANENTID = 0x40 - ATTR_CMN_OBJTAG = 0x10 - ATTR_CMN_OBJTYPE = 0x8 - ATTR_CMN_OWNERID = 0x8000 - ATTR_CMN_PARENTID = 0x4000000 - ATTR_CMN_PAROBJID = 0x80 - ATTR_CMN_RETURNED_ATTRS = 0x80000000 - ATTR_CMN_SCRIPT = 0x100 - ATTR_CMN_SETMASK = 0x51c7ff00 - ATTR_CMN_USERACCESS = 0x200000 - ATTR_CMN_UUID = 0x800000 - ATTR_CMN_VALIDMASK = 0xffffffff - ATTR_CMN_VOLSETMASK = 0x6700 - ATTR_FILE_ALLOCSIZE = 0x4 - ATTR_FILE_CLUMPSIZE = 0x10 - ATTR_FILE_DATAALLOCSIZE = 0x400 - ATTR_FILE_DATAEXTENTS = 0x800 - ATTR_FILE_DATALENGTH = 0x200 - ATTR_FILE_DEVTYPE = 0x20 - ATTR_FILE_FILETYPE = 0x40 - ATTR_FILE_FORKCOUNT = 0x80 - ATTR_FILE_FORKLIST = 0x100 - ATTR_FILE_IOBLOCKSIZE = 0x8 - ATTR_FILE_LINKCOUNT = 0x1 - ATTR_FILE_RSRCALLOCSIZE = 0x2000 - ATTR_FILE_RSRCEXTENTS = 0x4000 - ATTR_FILE_RSRCLENGTH = 0x1000 - ATTR_FILE_SETMASK = 0x20 - ATTR_FILE_TOTALSIZE = 0x2 - ATTR_FILE_VALIDMASK = 0x37ff - ATTR_VOL_ALLOCATIONCLUMP = 0x40 - ATTR_VOL_ATTRIBUTES = 0x40000000 - ATTR_VOL_CAPABILITIES = 0x20000 - ATTR_VOL_DIRCOUNT = 0x400 - ATTR_VOL_ENCODINGSUSED = 0x10000 - ATTR_VOL_FILECOUNT = 0x200 - ATTR_VOL_FSTYPE = 0x1 - ATTR_VOL_INFO = 0x80000000 - ATTR_VOL_IOBLOCKSIZE = 0x80 - ATTR_VOL_MAXOBJCOUNT = 0x800 - ATTR_VOL_MINALLOCATION = 0x20 - ATTR_VOL_MOUNTEDDEVICE = 0x8000 - ATTR_VOL_MOUNTFLAGS = 0x4000 - ATTR_VOL_MOUNTPOINT = 0x1000 - ATTR_VOL_NAME = 0x2000 - ATTR_VOL_OBJCOUNT = 0x100 - ATTR_VOL_QUOTA_SIZE = 0x10000000 - ATTR_VOL_RESERVED_SIZE = 0x20000000 - ATTR_VOL_SETMASK = 0x80002000 - ATTR_VOL_SIGNATURE = 0x2 - ATTR_VOL_SIZE = 0x4 - ATTR_VOL_SPACEAVAIL = 0x10 - ATTR_VOL_SPACEFREE = 0x8 - ATTR_VOL_UUID = 0x40000 - ATTR_VOL_VALIDMASK = 0xf007ffff - B0 = 0x0 - B110 = 0x6e - B115200 = 0x1c200 - B1200 = 0x4b0 - B134 = 0x86 - B14400 = 0x3840 - B150 = 0x96 - B1800 = 0x708 - B19200 = 0x4b00 - B200 = 0xc8 - B230400 = 0x38400 - B2400 = 0x960 - B28800 = 0x7080 - B300 = 0x12c - B38400 = 0x9600 - B4800 = 0x12c0 - B50 = 0x32 - B57600 = 0xe100 - B600 = 0x258 - B7200 = 0x1c20 - B75 = 0x4b - B76800 = 0x12c00 - B9600 = 0x2580 - BIOCFLUSH = 0x20004268 - BIOCGBLEN = 0x40044266 - BIOCGDLT = 0x4004426a - BIOCGDLTLIST = 0xc00c4279 - BIOCGETIF = 0x4020426b - BIOCGHDRCMPLT = 0x40044274 - BIOCGRSIG = 0x40044272 - BIOCGRTIMEOUT = 0x4010426e - BIOCGSEESENT = 0x40044276 - BIOCGSTATS = 0x4008426f - BIOCIMMEDIATE = 0x80044270 - BIOCPROMISC = 0x20004269 - BIOCSBLEN = 0xc0044266 - BIOCSDLT = 0x80044278 - BIOCSETF = 0x80104267 - BIOCSETFNR = 0x8010427e - BIOCSETIF = 0x8020426c - BIOCSHDRCMPLT = 0x80044275 - BIOCSRSIG = 0x80044273 - BIOCSRTIMEOUT = 0x8010426d - BIOCSSEESENT = 0x80044277 - BIOCVERSION = 0x40044271 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ALIGNMENT = 0x4 - BPF_ALU = 0x4 - BPF_AND = 0x50 - BPF_B = 0x10 - BPF_DIV = 0x30 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JMP = 0x5 - BPF_JSET = 0x40 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXBUFSIZE = 0x80000 - BPF_MAXINSNS = 0x200 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINBUFSIZE = 0x20 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_OR = 0x40 - BPF_RELEASE = 0x30bb6 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAX = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x8000 - BSDLY = 0x8000 - CFLUSH = 0xf - CLOCAL = 0x8000 - CLOCK_MONOTONIC = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_MONOTONIC_RAW_APPROX = 0x5 - CLOCK_PROCESS_CPUTIME_ID = 0xc - CLOCK_REALTIME = 0x0 - CLOCK_THREAD_CPUTIME_ID = 0x10 - CLOCK_UPTIME_RAW = 0x8 - CLOCK_UPTIME_RAW_APPROX = 0x9 - CLONE_NOFOLLOW = 0x1 - CLONE_NOOWNERCOPY = 0x2 - CR0 = 0x0 - CR1 = 0x1000 - CR2 = 0x2000 - CR3 = 0x3000 - CRDLY = 0x3000 - CREAD = 0x800 - CRTSCTS = 0x30000 - CS5 = 0x0 - CS6 = 0x100 - CS7 = 0x200 - CS8 = 0x300 - CSIZE = 0x300 - CSTART = 0x11 - CSTATUS = 0x14 - CSTOP = 0x13 - CSTOPB = 0x400 - CSUSP = 0x1a - CTLIOCGINFO = 0xc0644e03 - CTL_HW = 0x6 - CTL_KERN = 0x1 - CTL_MAXNAME = 0xc - CTL_NET = 0x4 - DLT_A429 = 0xb8 - DLT_A653_ICM = 0xb9 - DLT_AIRONET_HEADER = 0x78 - DLT_AOS = 0xde - DLT_APPLE_IP_OVER_IEEE1394 = 0x8a - DLT_ARCNET = 0x7 - DLT_ARCNET_LINUX = 0x81 - DLT_ATM_CLIP = 0x13 - DLT_ATM_RFC1483 = 0xb - DLT_AURORA = 0x7e - DLT_AX25 = 0x3 - DLT_AX25_KISS = 0xca - DLT_BACNET_MS_TP = 0xa5 - DLT_BLUETOOTH_HCI_H4 = 0xbb - DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 - DLT_CAN20B = 0xbe - DLT_CAN_SOCKETCAN = 0xe3 - DLT_CHAOS = 0x5 - DLT_CHDLC = 0x68 - DLT_CISCO_IOS = 0x76 - DLT_C_HDLC = 0x68 - DLT_C_HDLC_WITH_DIR = 0xcd - DLT_DBUS = 0xe7 - DLT_DECT = 0xdd - DLT_DOCSIS = 0x8f - DLT_DVB_CI = 0xeb - DLT_ECONET = 0x73 - DLT_EN10MB = 0x1 - DLT_EN3MB = 0x2 - DLT_ENC = 0x6d - DLT_ERF = 0xc5 - DLT_ERF_ETH = 0xaf - DLT_ERF_POS = 0xb0 - DLT_FC_2 = 0xe0 - DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 - DLT_FDDI = 0xa - DLT_FLEXRAY = 0xd2 - DLT_FRELAY = 0x6b - DLT_FRELAY_WITH_DIR = 0xce - DLT_GCOM_SERIAL = 0xad - DLT_GCOM_T1E1 = 0xac - DLT_GPF_F = 0xab - DLT_GPF_T = 0xaa - DLT_GPRS_LLC = 0xa9 - DLT_GSMTAP_ABIS = 0xda - DLT_GSMTAP_UM = 0xd9 - DLT_HHDLC = 0x79 - DLT_IBM_SN = 0x92 - DLT_IBM_SP = 0x91 - DLT_IEEE802 = 0x6 - DLT_IEEE802_11 = 0x69 - DLT_IEEE802_11_RADIO = 0x7f - DLT_IEEE802_11_RADIO_AVS = 0xa3 - DLT_IEEE802_15_4 = 0xc3 - DLT_IEEE802_15_4_LINUX = 0xbf - DLT_IEEE802_15_4_NOFCS = 0xe6 - DLT_IEEE802_15_4_NONASK_PHY = 0xd7 - DLT_IEEE802_16_MAC_CPS = 0xbc - DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 - DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 - DLT_IPMB_LINUX = 0xd1 - DLT_IPNET = 0xe2 - DLT_IPOIB = 0xf2 - DLT_IPV4 = 0xe4 - DLT_IPV6 = 0xe5 - DLT_IP_OVER_FC = 0x7a - DLT_JUNIPER_ATM1 = 0x89 - DLT_JUNIPER_ATM2 = 0x87 - DLT_JUNIPER_ATM_CEMIC = 0xee - DLT_JUNIPER_CHDLC = 0xb5 - DLT_JUNIPER_ES = 0x84 - DLT_JUNIPER_ETHER = 0xb2 - DLT_JUNIPER_FIBRECHANNEL = 0xea - DLT_JUNIPER_FRELAY = 0xb4 - DLT_JUNIPER_GGSN = 0x85 - DLT_JUNIPER_ISM = 0xc2 - DLT_JUNIPER_MFR = 0x86 - DLT_JUNIPER_MLFR = 0x83 - DLT_JUNIPER_MLPPP = 0x82 - DLT_JUNIPER_MONITOR = 0xa4 - DLT_JUNIPER_PIC_PEER = 0xae - DLT_JUNIPER_PPP = 0xb3 - DLT_JUNIPER_PPPOE = 0xa7 - DLT_JUNIPER_PPPOE_ATM = 0xa8 - DLT_JUNIPER_SERVICES = 0x88 - DLT_JUNIPER_SRX_E2E = 0xe9 - DLT_JUNIPER_ST = 0xc8 - DLT_JUNIPER_VP = 0xb7 - DLT_JUNIPER_VS = 0xe8 - DLT_LAPB_WITH_DIR = 0xcf - DLT_LAPD = 0xcb - DLT_LIN = 0xd4 - DLT_LINUX_EVDEV = 0xd8 - DLT_LINUX_IRDA = 0x90 - DLT_LINUX_LAPD = 0xb1 - DLT_LINUX_PPP_WITHDIRECTION = 0xa6 - DLT_LINUX_SLL = 0x71 - DLT_LOOP = 0x6c - DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x10a - DLT_MATCHING_MIN = 0x68 - DLT_MFR = 0xb6 - DLT_MOST = 0xd3 - DLT_MPEG_2_TS = 0xf3 - DLT_MPLS = 0xdb - DLT_MTP2 = 0x8c - DLT_MTP2_WITH_PHDR = 0x8b - DLT_MTP3 = 0x8d - DLT_MUX27010 = 0xec - DLT_NETANALYZER = 0xf0 - DLT_NETANALYZER_TRANSPARENT = 0xf1 - DLT_NFC_LLCP = 0xf5 - DLT_NFLOG = 0xef - DLT_NG40 = 0xf4 - DLT_NULL = 0x0 - DLT_PCI_EXP = 0x7d - DLT_PFLOG = 0x75 - DLT_PFSYNC = 0x12 - DLT_PPI = 0xc0 - DLT_PPP = 0x9 - DLT_PPP_BSDOS = 0x10 - DLT_PPP_ETHER = 0x33 - DLT_PPP_PPPD = 0xa6 - DLT_PPP_SERIAL = 0x32 - DLT_PPP_WITH_DIR = 0xcc - DLT_PPP_WITH_DIRECTION = 0xa6 - DLT_PRISM_HEADER = 0x77 - DLT_PRONET = 0x4 - DLT_RAIF1 = 0xc6 - DLT_RAW = 0xc - DLT_RIO = 0x7c - DLT_SCCP = 0x8e - DLT_SITA = 0xc4 - DLT_SLIP = 0x8 - DLT_SLIP_BSDOS = 0xf - DLT_STANAG_5066_D_PDU = 0xed - DLT_SUNATM = 0x7b - DLT_SYMANTEC_FIREWALL = 0x63 - DLT_TZSP = 0x80 - DLT_USB = 0xba - DLT_USB_DARWIN = 0x10a - DLT_USB_LINUX = 0xbd - DLT_USB_LINUX_MMAPPED = 0xdc - DLT_USER0 = 0x93 - DLT_USER1 = 0x94 - DLT_USER10 = 0x9d - DLT_USER11 = 0x9e - DLT_USER12 = 0x9f - DLT_USER13 = 0xa0 - DLT_USER14 = 0xa1 - DLT_USER15 = 0xa2 - DLT_USER2 = 0x95 - DLT_USER3 = 0x96 - DLT_USER4 = 0x97 - DLT_USER5 = 0x98 - DLT_USER6 = 0x99 - DLT_USER7 = 0x9a - DLT_USER8 = 0x9b - DLT_USER9 = 0x9c - DLT_WIHART = 0xdf - DLT_X2E_SERIAL = 0xd5 - DLT_X2E_XORAYA = 0xd6 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x40 - ECHOE = 0x2 - ECHOK = 0x4 - ECHOKE = 0x1 - ECHONL = 0x10 - ECHOPRT = 0x20 - EVFILT_AIO = -0x3 - EVFILT_EXCEPT = -0xf - EVFILT_FS = -0x9 - EVFILT_MACHPORT = -0x8 - EVFILT_PROC = -0x5 - EVFILT_READ = -0x1 - EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x11 - EVFILT_THREADMARKER = 0x11 - EVFILT_TIMER = -0x7 - EVFILT_USER = -0xa - EVFILT_VM = -0xc - EVFILT_VNODE = -0x4 - EVFILT_WRITE = -0x2 - EV_ADD = 0x1 - EV_CLEAR = 0x20 - EV_DELETE = 0x2 - EV_DISABLE = 0x8 - EV_DISPATCH = 0x80 - EV_DISPATCH2 = 0x180 - EV_ENABLE = 0x4 - EV_EOF = 0x8000 - EV_ERROR = 0x4000 - EV_FLAG0 = 0x1000 - EV_FLAG1 = 0x2000 - EV_ONESHOT = 0x10 - EV_OOBAND = 0x2000 - EV_POLL = 0x1000 - EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 - EV_UDATA_SPECIFIC = 0x100 - EV_VANISHED = 0x200 - EXTA = 0x4b00 - EXTB = 0x9600 - EXTPROC = 0x800 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x4000 - FFDLY = 0x4000 - FLUSHO = 0x800000 - FSOPT_ATTR_CMN_EXTENDED = 0x20 - FSOPT_NOFOLLOW = 0x1 - FSOPT_NOINMEMUPDATE = 0x2 - FSOPT_PACK_INVAL_ATTRS = 0x8 - FSOPT_REPORT_FULLSIZE = 0x4 - FSOPT_RETURN_REALDEV = 0x200 - F_ADDFILESIGS = 0x3d - F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 - F_ADDFILESIGS_INFO = 0x67 - F_ADDFILESIGS_RETURN = 0x61 - F_ADDFILESUPPL = 0x68 - F_ADDSIGS = 0x3b - F_ALLOCATEALL = 0x4 - F_ALLOCATECONTIG = 0x2 - F_BARRIERFSYNC = 0x55 - F_CHECK_LV = 0x62 - F_CHKCLEAN = 0x29 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x43 - F_FINDSIGS = 0x4e - F_FLUSH_DATA = 0x28 - F_FREEZE_FS = 0x35 - F_FULLFSYNC = 0x33 - F_GETCODEDIR = 0x48 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLK = 0x7 - F_GETLKPID = 0x42 - F_GETNOSIGPIPE = 0x4a - F_GETOWN = 0x5 - F_GETPATH = 0x32 - F_GETPATH_MTMINFO = 0x47 - F_GETPATH_NOFIRMLINK = 0x66 - F_GETPROTECTIONCLASS = 0x3f - F_GETPROTECTIONLEVEL = 0x4d - F_GETSIGSINFO = 0x69 - F_GLOBAL_NOCACHE = 0x37 - F_LOG2PHYS = 0x31 - F_LOG2PHYS_EXT = 0x41 - F_NOCACHE = 0x30 - F_NODIRECT = 0x3e - F_OK = 0x0 - F_PATHPKG_CHECK = 0x34 - F_PEOFPOSMODE = 0x3 - F_PREALLOCATE = 0x2a - F_PUNCHHOLE = 0x63 - F_RDADVISE = 0x2c - F_RDAHEAD = 0x2d - F_RDLCK = 0x1 - F_SETBACKINGSTORE = 0x46 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLK = 0x8 - F_SETLKW = 0x9 - F_SETLKWTIMEOUT = 0xa - F_SETNOSIGPIPE = 0x49 - F_SETOWN = 0x6 - F_SETPROTECTIONCLASS = 0x40 - F_SETSIZE = 0x2b - F_SINGLE_WRITER = 0x4c - F_SPECULATIVE_READ = 0x65 - F_THAW_FS = 0x36 - F_TRANSCODEKEY = 0x4b - F_TRIM_ACTIVE_FILE = 0x64 - F_UNLCK = 0x2 - F_VOLPOSMODE = 0x4 - F_WRLCK = 0x3 - HUPCL = 0x4000 - HW_MACHINE = 0x1 - ICANON = 0x100 - ICMP6_FILTER = 0x12 - ICRNL = 0x100 - IEXTEN = 0x400 - IFF_ALLMULTI = 0x200 - IFF_ALTPHYS = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_LINK0 = 0x1000 - IFF_LINK1 = 0x2000 - IFF_LINK2 = 0x4000 - IFF_LOOPBACK = 0x8 - IFF_MULTICAST = 0x8000 - IFF_NOARP = 0x80 - IFF_NOTRAILERS = 0x20 - IFF_OACTIVE = 0x400 - IFF_POINTOPOINT = 0x10 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SIMPLEX = 0x800 - IFF_UP = 0x1 - IFNAMSIZ = 0x10 - IFT_1822 = 0x2 - IFT_6LOWPAN = 0x40 - IFT_AAL5 = 0x31 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ATM = 0x25 - IFT_BRIDGE = 0xd1 - IFT_CARP = 0xf8 - IFT_CELLULAR = 0xff - IFT_CEPT = 0x13 - IFT_DS3 = 0x1e - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_ETHER = 0x6 - IFT_FAITH = 0x38 - IFT_FDDI = 0xf - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_GIF = 0x37 - IFT_HDH1822 = 0x3 - IFT_HIPPI = 0x2f - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IEEE1394 = 0x90 - IFT_IEEE8023ADLAG = 0x88 - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88026 = 0xa - IFT_L2VLAN = 0x87 - IFT_LAPB = 0x10 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_NSIP = 0x1b - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PDP = 0xff - IFT_PFLOG = 0xf5 - IFT_PFSYNC = 0xf6 - IFT_PKTAP = 0xfe - IFT_PPP = 0x17 - IFT_PROPMUX = 0x36 - IFT_PROPVIRTUAL = 0x35 - IFT_PTPSERIAL = 0x16 - IFT_RS232 = 0x21 - IFT_SDLC = 0x11 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_STARLAN = 0xb - IFT_STF = 0x39 - IFT_T1 = 0x12 - IFT_ULTRA = 0x1d - IFT_V35 = 0x2d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLASSD_HOST = 0xfffffff - IN_CLASSD_NET = 0xf0000000 - IN_CLASSD_NSHIFT = 0x1c - IN_LINKLOCALNETNUM = 0xa9fe0000 - IN_LOOPBACKNET = 0x7f - IPPROTO_3PC = 0x22 - IPPROTO_ADFS = 0x44 - IPPROTO_AH = 0x33 - IPPROTO_AHIP = 0x3d - IPPROTO_APES = 0x63 - IPPROTO_ARGUS = 0xd - IPPROTO_AX25 = 0x5d - IPPROTO_BHA = 0x31 - IPPROTO_BLT = 0x1e - IPPROTO_BRSATMON = 0x4c - IPPROTO_CFTP = 0x3e - IPPROTO_CHAOS = 0x10 - IPPROTO_CMTP = 0x26 - IPPROTO_CPHB = 0x49 - IPPROTO_CPNX = 0x48 - IPPROTO_DDP = 0x25 - IPPROTO_DGP = 0x56 - IPPROTO_DIVERT = 0xfe - IPPROTO_DONE = 0x101 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_EMCON = 0xe - IPPROTO_ENCAP = 0x62 - IPPROTO_EON = 0x50 - IPPROTO_ESP = 0x32 - IPPROTO_ETHERIP = 0x61 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GGP = 0x3 - IPPROTO_GMTP = 0x64 - IPPROTO_GRE = 0x2f - IPPROTO_HELLO = 0x3f - IPPROTO_HMP = 0x14 - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IDPR = 0x23 - IPPROTO_IDRP = 0x2d - IPPROTO_IGMP = 0x2 - IPPROTO_IGP = 0x55 - IPPROTO_IGRP = 0x58 - IPPROTO_IL = 0x28 - IPPROTO_INLSP = 0x34 - IPPROTO_INP = 0x20 - IPPROTO_IP = 0x0 - IPPROTO_IPCOMP = 0x6c - IPPROTO_IPCV = 0x47 - IPPROTO_IPEIP = 0x5e - IPPROTO_IPIP = 0x4 - IPPROTO_IPPC = 0x43 - IPPROTO_IPV4 = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_IRTP = 0x1c - IPPROTO_KRYPTOLAN = 0x41 - IPPROTO_LARP = 0x5b - IPPROTO_LEAF1 = 0x19 - IPPROTO_LEAF2 = 0x1a - IPPROTO_MAX = 0x100 - IPPROTO_MAXID = 0x34 - IPPROTO_MEAS = 0x13 - IPPROTO_MHRP = 0x30 - IPPROTO_MICP = 0x5f - IPPROTO_MTP = 0x5c - IPPROTO_MUX = 0x12 - IPPROTO_ND = 0x4d - IPPROTO_NHRP = 0x36 - IPPROTO_NONE = 0x3b - IPPROTO_NSP = 0x1f - IPPROTO_NVPII = 0xb - IPPROTO_OSPFIGP = 0x59 - IPPROTO_PGM = 0x71 - IPPROTO_PIGP = 0x9 - IPPROTO_PIM = 0x67 - IPPROTO_PRM = 0x15 - IPPROTO_PUP = 0xc - IPPROTO_PVP = 0x4b - IPPROTO_RAW = 0xff - IPPROTO_RCCMON = 0xa - IPPROTO_RDP = 0x1b - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_RVD = 0x42 - IPPROTO_SATEXPAK = 0x40 - IPPROTO_SATMON = 0x45 - IPPROTO_SCCSP = 0x60 - IPPROTO_SCTP = 0x84 - IPPROTO_SDRP = 0x2a - IPPROTO_SEP = 0x21 - IPPROTO_SRPC = 0x5a - IPPROTO_ST = 0x7 - IPPROTO_SVMTP = 0x52 - IPPROTO_SWIPE = 0x35 - IPPROTO_TCF = 0x57 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_TPXX = 0x27 - IPPROTO_TRUNK1 = 0x17 - IPPROTO_TRUNK2 = 0x18 - IPPROTO_TTP = 0x54 - IPPROTO_UDP = 0x11 - IPPROTO_VINES = 0x53 - IPPROTO_VISA = 0x46 - IPPROTO_VMTP = 0x51 - IPPROTO_WBEXPAK = 0x4f - IPPROTO_WBMON = 0x4e - IPPROTO_WSN = 0x4a - IPPROTO_XNET = 0xf - IPPROTO_XTP = 0x24 - IPV6_2292DSTOPTS = 0x17 - IPV6_2292HOPLIMIT = 0x14 - IPV6_2292HOPOPTS = 0x16 - IPV6_2292NEXTHOP = 0x15 - IPV6_2292PKTINFO = 0x13 - IPV6_2292PKTOPTIONS = 0x19 - IPV6_2292RTHDR = 0x18 - IPV6_3542DSTOPTS = 0x32 - IPV6_3542HOPLIMIT = 0x2f - IPV6_3542HOPOPTS = 0x31 - IPV6_3542NEXTHOP = 0x30 - IPV6_3542PKTINFO = 0x2e - IPV6_3542RTHDR = 0x33 - IPV6_ADDR_MC_FLAGS_PREFIX = 0x20 - IPV6_ADDR_MC_FLAGS_TRANSIENT = 0x10 - IPV6_ADDR_MC_FLAGS_UNICAST_BASED = 0x30 - IPV6_AUTOFLOWLABEL = 0x3b - IPV6_BINDV6ONLY = 0x1b - IPV6_BOUND_IF = 0x7d - IPV6_CHECKSUM = 0x1a - IPV6_DEFAULT_MULTICAST_HOPS = 0x1 - IPV6_DEFAULT_MULTICAST_LOOP = 0x1 - IPV6_DEFHLIM = 0x40 - IPV6_DONTFRAG = 0x3e - IPV6_DSTOPTS = 0x32 - IPV6_FAITH = 0x1d - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 - IPV6_FLOW_ECN_MASK = 0x3000 - IPV6_FRAGTTL = 0x3c - IPV6_FW_ADD = 0x1e - IPV6_FW_DEL = 0x1f - IPV6_FW_FLUSH = 0x20 - IPV6_FW_GET = 0x22 - IPV6_FW_ZERO = 0x21 - IPV6_HLIMDEC = 0x1 - IPV6_HOPLIMIT = 0x2f - IPV6_HOPOPTS = 0x31 - IPV6_IPSEC_POLICY = 0x1c - IPV6_JOIN_GROUP = 0xc - IPV6_LEAVE_GROUP = 0xd - IPV6_MAXHLIM = 0xff - IPV6_MAXOPTHDR = 0x800 - IPV6_MAXPACKET = 0xffff - IPV6_MAX_GROUP_SRC_FILTER = 0x200 - IPV6_MAX_MEMBERSHIPS = 0xfff - IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f - IPV6_MMTU = 0x500 - IPV6_MSFILTER = 0x4a - IPV6_MULTICAST_HOPS = 0xa - IPV6_MULTICAST_IF = 0x9 - IPV6_MULTICAST_LOOP = 0xb - IPV6_NEXTHOP = 0x30 - IPV6_PATHMTU = 0x2c - IPV6_PKTINFO = 0x2e - IPV6_PORTRANGE = 0xe - IPV6_PORTRANGE_DEFAULT = 0x0 - IPV6_PORTRANGE_HIGH = 0x1 - IPV6_PORTRANGE_LOW = 0x2 - IPV6_PREFER_TEMPADDR = 0x3f - IPV6_RECVDSTOPTS = 0x28 - IPV6_RECVHOPLIMIT = 0x25 - IPV6_RECVHOPOPTS = 0x27 - IPV6_RECVPATHMTU = 0x2b - IPV6_RECVPKTINFO = 0x3d - IPV6_RECVRTHDR = 0x26 - IPV6_RECVTCLASS = 0x23 - IPV6_RTHDR = 0x33 - IPV6_RTHDRDSTOPTS = 0x39 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_SOCKOPT_RESERVED1 = 0x3 - IPV6_TCLASS = 0x24 - IPV6_UNICAST_HOPS = 0x4 - IPV6_USE_MIN_MTU = 0x2a - IPV6_V6ONLY = 0x1b - IPV6_VERSION = 0x60 - IPV6_VERSION_MASK = 0xf0 - IP_ADD_MEMBERSHIP = 0xc - IP_ADD_SOURCE_MEMBERSHIP = 0x46 - IP_BLOCK_SOURCE = 0x48 - IP_BOUND_IF = 0x19 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DONTFRAG = 0x1c - IP_DROP_MEMBERSHIP = 0xd - IP_DROP_SOURCE_MEMBERSHIP = 0x47 - IP_DUMMYNET_CONFIGURE = 0x3c - IP_DUMMYNET_DEL = 0x3d - IP_DUMMYNET_FLUSH = 0x3e - IP_DUMMYNET_GET = 0x40 - IP_FAITH = 0x16 - IP_FW_ADD = 0x28 - IP_FW_DEL = 0x29 - IP_FW_FLUSH = 0x2a - IP_FW_GET = 0x2c - IP_FW_RESETLOG = 0x2d - IP_FW_ZERO = 0x2b - IP_HDRINCL = 0x2 - IP_IPSEC_POLICY = 0x15 - IP_MAXPACKET = 0xffff - IP_MAX_GROUP_SRC_FILTER = 0x200 - IP_MAX_MEMBERSHIPS = 0xfff - IP_MAX_SOCK_MUTE_FILTER = 0x80 - IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MF = 0x2000 - IP_MIN_MEMBERSHIPS = 0x1f - IP_MSFILTER = 0x4a - IP_MSS = 0x240 - IP_MULTICAST_IF = 0x9 - IP_MULTICAST_IFINDEX = 0x42 - IP_MULTICAST_LOOP = 0xb - IP_MULTICAST_TTL = 0xa - IP_MULTICAST_VIF = 0xe - IP_NAT__XXX = 0x37 - IP_OFFMASK = 0x1fff - IP_OLD_FW_ADD = 0x32 - IP_OLD_FW_DEL = 0x33 - IP_OLD_FW_FLUSH = 0x34 - IP_OLD_FW_GET = 0x36 - IP_OLD_FW_RESETLOG = 0x38 - IP_OLD_FW_ZERO = 0x35 - IP_OPTIONS = 0x1 - IP_PKTINFO = 0x1a - IP_PORTRANGE = 0x13 - IP_PORTRANGE_DEFAULT = 0x0 - IP_PORTRANGE_HIGH = 0x1 - IP_PORTRANGE_LOW = 0x2 - IP_RECVDSTADDR = 0x7 - IP_RECVIF = 0x14 - IP_RECVOPTS = 0x5 - IP_RECVPKTINFO = 0x1a - IP_RECVRETOPTS = 0x6 - IP_RECVTOS = 0x1b - IP_RECVTTL = 0x18 - IP_RETOPTS = 0x8 - IP_RF = 0x8000 - IP_RSVP_OFF = 0x10 - IP_RSVP_ON = 0xf - IP_RSVP_VIF_OFF = 0x12 - IP_RSVP_VIF_ON = 0x11 - IP_STRIPHDR = 0x17 - IP_TOS = 0x3 - IP_TRAFFIC_MGT_BACKGROUND = 0x41 - IP_TTL = 0x4 - IP_UNBLOCK_SOURCE = 0x49 - ISIG = 0x80 - ISTRIP = 0x20 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x400 - IXON = 0x200 - KERN_HOSTNAME = 0xa - KERN_OSRELEASE = 0x2 - KERN_OSTYPE = 0x1 - KERN_VERSION = 0x4 - LOCAL_PEERCRED = 0x1 - LOCAL_PEEREPID = 0x3 - LOCAL_PEEREUUID = 0x5 - LOCAL_PEERPID = 0x2 - LOCAL_PEERTOKEN = 0x6 - LOCAL_PEERUUID = 0x4 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - MADV_CAN_REUSE = 0x9 - MADV_DONTNEED = 0x4 - MADV_FREE = 0x5 - MADV_FREE_REUSABLE = 0x7 - MADV_FREE_REUSE = 0x8 - MADV_NORMAL = 0x0 - MADV_PAGEOUT = 0xa - MADV_RANDOM = 0x1 - MADV_SEQUENTIAL = 0x2 - MADV_WILLNEED = 0x3 - MADV_ZERO_WIRED_PAGES = 0x6 - MAP_32BIT = 0x8000 - MAP_ANON = 0x1000 - MAP_ANONYMOUS = 0x1000 - MAP_COPY = 0x2 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_HASSEMAPHORE = 0x200 - MAP_JIT = 0x800 - MAP_NOCACHE = 0x400 - MAP_NOEXTEND = 0x100 - MAP_NORESERVE = 0x40 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x20 - MAP_RESERVED0080 = 0x80 - MAP_RESILIENT_CODESIGN = 0x2000 - MAP_RESILIENT_MEDIA = 0x4000 - MAP_SHARED = 0x1 - MAP_TRANSLATED_ALLOW_EXECUTE = 0x20000 - MAP_UNIX03 = 0x40000 - MCAST_BLOCK_SOURCE = 0x54 - MCAST_EXCLUDE = 0x2 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x50 - MCAST_JOIN_SOURCE_GROUP = 0x52 - MCAST_LEAVE_GROUP = 0x51 - MCAST_LEAVE_SOURCE_GROUP = 0x53 - MCAST_UNBLOCK_SOURCE = 0x55 - MCAST_UNDEFINED = 0x0 - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MNT_ASYNC = 0x40 - MNT_AUTOMOUNTED = 0x400000 - MNT_CMDFLAGS = 0xf0000 - MNT_CPROTECT = 0x80 - MNT_DEFWRITE = 0x2000000 - MNT_DONTBROWSE = 0x100000 - MNT_DOVOLFS = 0x8000 - MNT_DWAIT = 0x4 - MNT_EXPORTED = 0x100 - MNT_EXT_ROOT_DATA_VOL = 0x1 - MNT_FORCE = 0x80000 - MNT_IGNORE_OWNERSHIP = 0x200000 - MNT_JOURNALED = 0x800000 - MNT_LOCAL = 0x1000 - MNT_MULTILABEL = 0x4000000 - MNT_NOATIME = 0x10000000 - MNT_NOBLOCK = 0x20000 - MNT_NODEV = 0x10 - MNT_NOEXEC = 0x4 - MNT_NOSUID = 0x8 - MNT_NOUSERXATTR = 0x1000000 - MNT_NOWAIT = 0x2 - MNT_QUARANTINE = 0x400 - MNT_QUOTA = 0x2000 - MNT_RDONLY = 0x1 - MNT_RELOAD = 0x40000 - MNT_REMOVABLE = 0x200 - MNT_ROOTFS = 0x4000 - MNT_SNAPSHOT = 0x40000000 - MNT_STRICTATIME = 0x80000000 - MNT_SYNCHRONOUS = 0x2 - MNT_UNION = 0x20 - MNT_UNKNOWNPERMISSIONS = 0x200000 - MNT_UPDATE = 0x10000 - MNT_VISFLAGMASK = 0xd7f0f7ff - MNT_WAIT = 0x1 - MSG_CTRUNC = 0x20 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x80 - MSG_EOF = 0x100 - MSG_EOR = 0x8 - MSG_FLUSH = 0x400 - MSG_HAVEMORE = 0x2000 - MSG_HOLD = 0x800 - MSG_NEEDSA = 0x10000 - MSG_NOSIGNAL = 0x80000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_RCVMORE = 0x4000 - MSG_SEND = 0x1000 - MSG_TRUNC = 0x10 - MSG_WAITALL = 0x40 - MSG_WAITSTREAM = 0x200 - MS_ASYNC = 0x1 - MS_DEACTIVATE = 0x8 - MS_INVALIDATE = 0x2 - MS_KILLPAGES = 0x4 - MS_SYNC = 0x10 - NAME_MAX = 0xff - NET_RT_DUMP = 0x1 - NET_RT_DUMP2 = 0x7 - NET_RT_FLAGS = 0x2 - NET_RT_FLAGS_PRIV = 0xa - NET_RT_IFLIST = 0x3 - NET_RT_IFLIST2 = 0x6 - NET_RT_MAXID = 0xb - NET_RT_STAT = 0x4 - NET_RT_TRASH = 0x5 - NFDBITS = 0x20 - NL0 = 0x0 - NL1 = 0x100 - NL2 = 0x200 - NL3 = 0x300 - NLDLY = 0x300 - NOFLSH = 0x80000000 - NOKERNINFO = 0x2000000 - NOTE_ABSOLUTE = 0x8 - NOTE_ATTRIB = 0x8 - NOTE_BACKGROUND = 0x40 - NOTE_CHILD = 0x4 - NOTE_CRITICAL = 0x20 - NOTE_DELETE = 0x1 - NOTE_EXEC = 0x20000000 - NOTE_EXIT = 0x80000000 - NOTE_EXITSTATUS = 0x4000000 - NOTE_EXIT_CSERROR = 0x40000 - NOTE_EXIT_DECRYPTFAIL = 0x10000 - NOTE_EXIT_DETAIL = 0x2000000 - NOTE_EXIT_DETAIL_MASK = 0x70000 - NOTE_EXIT_MEMORY = 0x20000 - NOTE_EXIT_REPARENTED = 0x80000 - NOTE_EXTEND = 0x4 - NOTE_FFAND = 0x40000000 - NOTE_FFCOPY = 0xc0000000 - NOTE_FFCTRLMASK = 0xc0000000 - NOTE_FFLAGSMASK = 0xffffff - NOTE_FFNOP = 0x0 - NOTE_FFOR = 0x80000000 - NOTE_FORK = 0x40000000 - NOTE_FUNLOCK = 0x100 - NOTE_LEEWAY = 0x10 - NOTE_LINK = 0x10 - NOTE_LOWAT = 0x1 - NOTE_MACHTIME = 0x100 - NOTE_MACH_CONTINUOUS_TIME = 0x80 - NOTE_NONE = 0x80 - NOTE_NSECONDS = 0x4 - NOTE_OOB = 0x2 - NOTE_PCTRLMASK = -0x100000 - NOTE_PDATAMASK = 0xfffff - NOTE_REAP = 0x10000000 - NOTE_RENAME = 0x20 - NOTE_REVOKE = 0x40 - NOTE_SECONDS = 0x1 - NOTE_SIGNAL = 0x8000000 - NOTE_TRACK = 0x1 - NOTE_TRACKERR = 0x2 - NOTE_TRIGGER = 0x1000000 - NOTE_USECONDS = 0x2 - NOTE_VM_ERROR = 0x10000000 - NOTE_VM_PRESSURE = 0x80000000 - NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000 - NOTE_VM_PRESSURE_TERMINATE = 0x40000000 - NOTE_WRITE = 0x2 - OCRNL = 0x10 - OFDEL = 0x20000 - OFILL = 0x80 - ONLCR = 0x2 - ONLRET = 0x40 - ONOCR = 0x20 - ONOEOT = 0x8 - OPOST = 0x1 - OXTABS = 0x4 - O_ACCMODE = 0x3 - O_ALERT = 0x20000000 - O_APPEND = 0x8 - O_ASYNC = 0x40 - O_CLOEXEC = 0x1000000 - O_CREAT = 0x200 - O_DIRECTORY = 0x100000 - O_DP_GETRAWENCRYPTED = 0x1 - O_DP_GETRAWUNENCRYPTED = 0x2 - O_DSYNC = 0x400000 - O_EVTONLY = 0x8000 - O_EXCL = 0x800 - O_EXLOCK = 0x20 - O_FSYNC = 0x80 - O_NDELAY = 0x4 - O_NOCTTY = 0x20000 - O_NOFOLLOW = 0x100 - O_NOFOLLOW_ANY = 0x20000000 - O_NONBLOCK = 0x4 - O_POPUP = 0x80000000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_SHLOCK = 0x10 - O_SYMLINK = 0x200000 - O_SYNC = 0x80 - O_TRUNC = 0x400 - O_WRONLY = 0x1 - PARENB = 0x1000 - PARMRK = 0x8 - PARODD = 0x2000 - PENDIN = 0x20000000 - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROT_EXEC = 0x4 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PT_ATTACH = 0xa - PT_ATTACHEXC = 0xe - PT_CONTINUE = 0x7 - PT_DENY_ATTACH = 0x1f - PT_DETACH = 0xb - PT_FIRSTMACH = 0x20 - PT_FORCEQUOTA = 0x1e - PT_KILL = 0x8 - PT_READ_D = 0x2 - PT_READ_I = 0x1 - PT_READ_U = 0x3 - PT_SIGEXC = 0xc - PT_STEP = 0x9 - PT_THUPDATE = 0xd - PT_TRACE_ME = 0x0 - PT_WRITE_D = 0x5 - PT_WRITE_I = 0x4 - PT_WRITE_U = 0x6 - RLIMIT_AS = 0x5 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_CPU_USAGE_MONITOR = 0x2 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_MEMLOCK = 0x6 - RLIMIT_NOFILE = 0x8 - RLIMIT_NPROC = 0x7 - RLIMIT_RSS = 0x5 - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0x7fffffffffffffff - RTAX_AUTHOR = 0x6 - RTAX_BRD = 0x7 - RTAX_DST = 0x0 - RTAX_GATEWAY = 0x1 - RTAX_GENMASK = 0x3 - RTAX_IFA = 0x5 - RTAX_IFP = 0x4 - RTAX_MAX = 0x8 - RTAX_NETMASK = 0x2 - RTA_AUTHOR = 0x40 - RTA_BRD = 0x80 - RTA_DST = 0x1 - RTA_GATEWAY = 0x2 - RTA_GENMASK = 0x8 - RTA_IFA = 0x20 - RTA_IFP = 0x10 - RTA_NETMASK = 0x4 - RTF_BLACKHOLE = 0x1000 - RTF_BROADCAST = 0x400000 - RTF_CLONING = 0x100 - RTF_CONDEMNED = 0x2000000 - RTF_DEAD = 0x20000000 - RTF_DELCLONE = 0x80 - RTF_DONE = 0x40 - RTF_DYNAMIC = 0x10 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_IFREF = 0x4000000 - RTF_IFSCOPE = 0x1000000 - RTF_LLDATA = 0x400 - RTF_LLINFO = 0x400 - RTF_LOCAL = 0x200000 - RTF_MODIFIED = 0x20 - RTF_MULTICAST = 0x800000 - RTF_NOIFREF = 0x2000 - RTF_PINNED = 0x100000 - RTF_PRCLONING = 0x10000 - RTF_PROTO1 = 0x8000 - RTF_PROTO2 = 0x4000 - RTF_PROTO3 = 0x40000 - RTF_PROXY = 0x8000000 - RTF_REJECT = 0x8 - RTF_ROUTER = 0x10000000 - RTF_STATIC = 0x800 - RTF_UP = 0x1 - RTF_WASCLONED = 0x20000 - RTF_XRESOLVE = 0x200 - RTM_ADD = 0x1 - RTM_CHANGE = 0x3 - RTM_DELADDR = 0xd - RTM_DELETE = 0x2 - RTM_DELMADDR = 0x10 - RTM_GET = 0x4 - RTM_GET2 = 0x14 - RTM_IFINFO = 0xe - RTM_IFINFO2 = 0x12 - RTM_LOCK = 0x8 - RTM_LOSING = 0x5 - RTM_MISS = 0x7 - RTM_NEWADDR = 0xc - RTM_NEWMADDR = 0xf - RTM_NEWMADDR2 = 0x13 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - RTM_REDIRECT = 0x6 - RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 - RTM_VERSION = 0x5 - RTV_EXPIRE = 0x4 - RTV_HOPCOUNT = 0x2 - RTV_MTU = 0x1 - RTV_RPIPE = 0x8 - RTV_RTT = 0x40 - RTV_RTTVAR = 0x80 - RTV_SPIPE = 0x10 - RTV_SSTHRESH = 0x20 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - SCM_CREDS = 0x3 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x2 - SCM_TIMESTAMP_MONOTONIC = 0x4 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDMULTI = 0x80206931 - SIOCAIFADDR = 0x8040691a - SIOCARPIPLL = 0xc0206928 - SIOCATMARK = 0x40047307 - SIOCAUTOADDR = 0xc0206926 - SIOCAUTONETMASK = 0x80206927 - SIOCDELMULTI = 0x80206932 - SIOCDIFADDR = 0x80206919 - SIOCDIFPHYADDR = 0x80206941 - SIOCGDRVSPEC = 0xc028697b - SIOCGETVLAN = 0xc020697f - SIOCGHIWAT = 0x40047301 - SIOCGIF6LOWPAN = 0xc02069c5 - SIOCGIFADDR = 0xc0206921 - SIOCGIFALTMTU = 0xc0206948 - SIOCGIFASYNCMAP = 0xc020697c - SIOCGIFBOND = 0xc0206947 - SIOCGIFBRDADDR = 0xc0206923 - SIOCGIFCAP = 0xc020695b - SIOCGIFCONF = 0xc00c6924 - SIOCGIFDEVMTU = 0xc0206944 - SIOCGIFDSTADDR = 0xc0206922 - SIOCGIFFLAGS = 0xc0206911 - SIOCGIFFUNCTIONALTYPE = 0xc02069ad - SIOCGIFGENERIC = 0xc020693a - SIOCGIFKPI = 0xc0206987 - SIOCGIFMAC = 0xc0206982 - SIOCGIFMEDIA = 0xc02c6938 - SIOCGIFMETRIC = 0xc0206917 - SIOCGIFMTU = 0xc0206933 - SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206940 - SIOCGIFPHYS = 0xc0206935 - SIOCGIFPSRCADDR = 0xc020693f - SIOCGIFSTATUS = 0xc331693d - SIOCGIFVLAN = 0xc020697f - SIOCGIFWAKEFLAGS = 0xc0206988 - SIOCGIFXMEDIA = 0xc02c6948 - SIOCGLOWAT = 0x40047303 - SIOCGPGRP = 0x40047309 - SIOCIFCREATE = 0xc0206978 - SIOCIFCREATE2 = 0xc020697a - SIOCIFDESTROY = 0x80206979 - SIOCIFGCLONERS = 0xc0106981 - SIOCRSLVMULTI = 0xc010693b - SIOCSDRVSPEC = 0x8028697b - SIOCSETVLAN = 0x8020697e - SIOCSHIWAT = 0x80047300 - SIOCSIF6LOWPAN = 0x802069c4 - SIOCSIFADDR = 0x8020690c - SIOCSIFALTMTU = 0x80206945 - SIOCSIFASYNCMAP = 0x8020697d - SIOCSIFBOND = 0x80206946 - SIOCSIFBRDADDR = 0x80206913 - SIOCSIFCAP = 0x8020695a - SIOCSIFDSTADDR = 0x8020690e - SIOCSIFFLAGS = 0x80206910 - SIOCSIFGENERIC = 0x80206939 - SIOCSIFKPI = 0x80206986 - SIOCSIFLLADDR = 0x8020693c - SIOCSIFMAC = 0x80206983 - SIOCSIFMEDIA = 0xc0206937 - SIOCSIFMETRIC = 0x80206918 - SIOCSIFMTU = 0x80206934 - SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x8040693e - SIOCSIFPHYS = 0x80206936 - SIOCSIFVLAN = 0x8020697e - SIOCSLOWAT = 0x80047302 - SIOCSPGRP = 0x80047308 - SOCK_DGRAM = 0x2 - SOCK_MAXADDRLEN = 0xff - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_LOCAL = 0x0 - SOL_SOCKET = 0xffff - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x2 - SO_BROADCAST = 0x20 - SO_DEBUG = 0x1 - SO_DONTROUTE = 0x10 - SO_DONTTRUNC = 0x2000 - SO_ERROR = 0x1007 - SO_KEEPALIVE = 0x8 - SO_LABEL = 0x1010 - SO_LINGER = 0x80 - SO_LINGER_SEC = 0x1080 - SO_NETSVC_MARKING_LEVEL = 0x1119 - SO_NET_SERVICE_TYPE = 0x1116 - SO_NKE = 0x1021 - SO_NOADDRERR = 0x1023 - SO_NOSIGPIPE = 0x1022 - SO_NOTIFYCONFLICT = 0x1026 - SO_NP_EXTENSIONS = 0x1083 - SO_NREAD = 0x1020 - SO_NUMRCVPKT = 0x1112 - SO_NWRITE = 0x1024 - SO_OOBINLINE = 0x100 - SO_PEERLABEL = 0x1011 - SO_RANDOMPORT = 0x1082 - SO_RCVBUF = 0x1002 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_REUSESHAREUID = 0x1025 - SO_SNDBUF = 0x1001 - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_TIMESTAMP = 0x400 - SO_TIMESTAMP_MONOTONIC = 0x800 - SO_TYPE = 0x1008 - SO_UPCALLCLOSEWAIT = 0x1027 - SO_USELOOPBACK = 0x40 - SO_WANTMORE = 0x4000 - SO_WANTOOBFLAG = 0x8000 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IFWHT = 0xe000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISTXT = 0x200 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x400 - TAB2 = 0x800 - TAB3 = 0x4 - TABDLY = 0xc04 - TCIFLUSH = 0x1 - TCIOFF = 0x3 - TCIOFLUSH = 0x3 - TCION = 0x4 - TCOFLUSH = 0x2 - TCOOFF = 0x1 - TCOON = 0x2 - TCP_CONNECTIONTIMEOUT = 0x20 - TCP_CONNECTION_INFO = 0x106 - TCP_ENABLE_ECN = 0x104 - TCP_FASTOPEN = 0x105 - TCP_KEEPALIVE = 0x10 - TCP_KEEPCNT = 0x102 - TCP_KEEPINTVL = 0x101 - TCP_MAXHLEN = 0x3c - TCP_MAXOLEN = 0x28 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_SACK = 0x4 - TCP_MAX_WINSHIFT = 0xe - TCP_MINMSS = 0xd8 - TCP_MSS = 0x200 - TCP_NODELAY = 0x1 - TCP_NOOPT = 0x8 - TCP_NOPUSH = 0x4 - TCP_NOTSENT_LOWAT = 0x201 - TCP_RXT_CONNDROPTIME = 0x80 - TCP_RXT_FINDROP = 0x100 - TCP_SENDMOREACKS = 0x103 - TCSAFLUSH = 0x2 - TIOCCBRK = 0x2000747a - TIOCCDTR = 0x20007478 - TIOCCONS = 0x80047462 - TIOCDCDTIMESTAMP = 0x40107458 - TIOCDRAIN = 0x2000745e - TIOCDSIMICROCODE = 0x20007455 - TIOCEXCL = 0x2000740d - TIOCEXT = 0x80047460 - TIOCFLUSH = 0x80047410 - TIOCGDRAINWAIT = 0x40047456 - TIOCGETA = 0x40487413 - TIOCGETD = 0x4004741a - TIOCGPGRP = 0x40047477 - TIOCGWINSZ = 0x40087468 - TIOCIXOFF = 0x20007480 - TIOCIXON = 0x20007481 - TIOCMBIC = 0x8004746b - TIOCMBIS = 0x8004746c - TIOCMGDTRWAIT = 0x4004745a - TIOCMGET = 0x4004746a - TIOCMODG = 0x40047403 - TIOCMODS = 0x80047404 - TIOCMSDTRWAIT = 0x8004745b - TIOCMSET = 0x8004746d - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x20007471 - TIOCNXCL = 0x2000740e - TIOCOUTQ = 0x40047473 - TIOCPKT = 0x80047470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCPTYGNAME = 0x40807453 - TIOCPTYGRANT = 0x20007454 - TIOCPTYUNLK = 0x20007452 - TIOCREMOTE = 0x80047469 - TIOCSBRK = 0x2000747b - TIOCSCONS = 0x20007463 - TIOCSCTTY = 0x20007461 - TIOCSDRAINWAIT = 0x80047457 - TIOCSDTR = 0x20007479 - TIOCSETA = 0x80487414 - TIOCSETAF = 0x80487416 - TIOCSETAW = 0x80487415 - TIOCSETD = 0x8004741b - TIOCSIG = 0x2000745f - TIOCSPGRP = 0x80047476 - TIOCSTART = 0x2000746e - TIOCSTAT = 0x20007465 - TIOCSTI = 0x80017472 - TIOCSTOP = 0x2000746f - TIOCSWINSZ = 0x80087467 - TIOCTIMESTAMP = 0x40107459 - TIOCUCNTL = 0x80047466 - TOSTOP = 0x400000 - VDISCARD = 0xf - VDSUSP = 0xb - VEOF = 0x0 - VEOL = 0x1 - VEOL2 = 0x2 - VERASE = 0x3 - VINTR = 0x8 - VKILL = 0x5 - VLNEXT = 0xe - VMIN = 0x10 - VM_LOADAVG = 0x2 - VM_MACHFACTOR = 0x4 - VM_MAXID = 0x6 - VM_METER = 0x1 - VM_SWAPUSAGE = 0x5 - VQUIT = 0x9 - VREPRINT = 0x6 - VSTART = 0xc - VSTATUS = 0x12 - VSTOP = 0xd - VSUSP = 0xa - VT0 = 0x0 - VT1 = 0x10000 - VTDLY = 0x10000 - VTIME = 0x11 - VWERASE = 0x4 - WCONTINUED = 0x10 - WCOREFLAG = 0x80 - WEXITED = 0x4 - WNOHANG = 0x1 - WNOWAIT = 0x20 - WORDSIZE = 0x40 - WSTOPPED = 0x8 - WUNTRACED = 0x2 - XATTR_CREATE = 0x2 - XATTR_NODEFAULT = 0x10 - XATTR_NOFOLLOW = 0x1 - XATTR_NOSECURITY = 0x8 - XATTR_REPLACE = 0x4 - XATTR_SHOWCOMPRESSION = 0x20 + AF_APPLETALK = 0x10 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1c + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1e + AF_IPX = 0x17 + AF_ISDN = 0x1c + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x29 + AF_NATM = 0x1f + AF_NDRV = 0x1b + AF_NETBIOS = 0x21 + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PPP = 0x22 + AF_PUP = 0x4 + AF_RESERVED_36 = 0x24 + AF_ROUTE = 0x11 + AF_SIP = 0x18 + AF_SNA = 0xb + AF_SYSTEM = 0x20 + AF_SYS_CONTROL = 0x2 + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_UTUN = 0x26 + AF_VSOCK = 0x28 + ALTWERASE = 0x200 + ATTR_BIT_MAP_COUNT = 0x5 + ATTR_CMN_ACCESSMASK = 0x20000 + ATTR_CMN_ACCTIME = 0x1000 + ATTR_CMN_ADDEDTIME = 0x10000000 + ATTR_CMN_BKUPTIME = 0x2000 + ATTR_CMN_CHGTIME = 0x800 + ATTR_CMN_CRTIME = 0x200 + ATTR_CMN_DATA_PROTECT_FLAGS = 0x40000000 + ATTR_CMN_DEVID = 0x2 + ATTR_CMN_DOCUMENT_ID = 0x100000 + ATTR_CMN_ERROR = 0x20000000 + ATTR_CMN_EXTENDED_SECURITY = 0x400000 + ATTR_CMN_FILEID = 0x2000000 + ATTR_CMN_FLAGS = 0x40000 + ATTR_CMN_FNDRINFO = 0x4000 + ATTR_CMN_FSID = 0x4 + ATTR_CMN_FULLPATH = 0x8000000 + ATTR_CMN_GEN_COUNT = 0x80000 + ATTR_CMN_GRPID = 0x10000 + ATTR_CMN_GRPUUID = 0x1000000 + ATTR_CMN_MODTIME = 0x400 + ATTR_CMN_NAME = 0x1 + ATTR_CMN_NAMEDATTRCOUNT = 0x80000 + ATTR_CMN_NAMEDATTRLIST = 0x100000 + ATTR_CMN_OBJID = 0x20 + ATTR_CMN_OBJPERMANENTID = 0x40 + ATTR_CMN_OBJTAG = 0x10 + ATTR_CMN_OBJTYPE = 0x8 + ATTR_CMN_OWNERID = 0x8000 + ATTR_CMN_PARENTID = 0x4000000 + ATTR_CMN_PAROBJID = 0x80 + ATTR_CMN_RETURNED_ATTRS = 0x80000000 + ATTR_CMN_SCRIPT = 0x100 + ATTR_CMN_SETMASK = 0x51c7ff00 + ATTR_CMN_USERACCESS = 0x200000 + ATTR_CMN_UUID = 0x800000 + ATTR_CMN_VALIDMASK = 0xffffffff + ATTR_CMN_VOLSETMASK = 0x6700 + ATTR_FILE_ALLOCSIZE = 0x4 + ATTR_FILE_CLUMPSIZE = 0x10 + ATTR_FILE_DATAALLOCSIZE = 0x400 + ATTR_FILE_DATAEXTENTS = 0x800 + ATTR_FILE_DATALENGTH = 0x200 + ATTR_FILE_DEVTYPE = 0x20 + ATTR_FILE_FILETYPE = 0x40 + ATTR_FILE_FORKCOUNT = 0x80 + ATTR_FILE_FORKLIST = 0x100 + ATTR_FILE_IOBLOCKSIZE = 0x8 + ATTR_FILE_LINKCOUNT = 0x1 + ATTR_FILE_RSRCALLOCSIZE = 0x2000 + ATTR_FILE_RSRCEXTENTS = 0x4000 + ATTR_FILE_RSRCLENGTH = 0x1000 + ATTR_FILE_SETMASK = 0x20 + ATTR_FILE_TOTALSIZE = 0x2 + ATTR_FILE_VALIDMASK = 0x37ff + ATTR_VOL_ALLOCATIONCLUMP = 0x40 + ATTR_VOL_ATTRIBUTES = 0x40000000 + ATTR_VOL_CAPABILITIES = 0x20000 + ATTR_VOL_DIRCOUNT = 0x400 + ATTR_VOL_ENCODINGSUSED = 0x10000 + ATTR_VOL_FILECOUNT = 0x200 + ATTR_VOL_FSTYPE = 0x1 + ATTR_VOL_INFO = 0x80000000 + ATTR_VOL_IOBLOCKSIZE = 0x80 + ATTR_VOL_MAXOBJCOUNT = 0x800 + ATTR_VOL_MINALLOCATION = 0x20 + ATTR_VOL_MOUNTEDDEVICE = 0x8000 + ATTR_VOL_MOUNTFLAGS = 0x4000 + ATTR_VOL_MOUNTPOINT = 0x1000 + ATTR_VOL_NAME = 0x2000 + ATTR_VOL_OBJCOUNT = 0x100 + ATTR_VOL_QUOTA_SIZE = 0x10000000 + ATTR_VOL_RESERVED_SIZE = 0x20000000 + ATTR_VOL_SETMASK = 0x80002000 + ATTR_VOL_SIGNATURE = 0x2 + ATTR_VOL_SIZE = 0x4 + ATTR_VOL_SPACEAVAIL = 0x10 + ATTR_VOL_SPACEFREE = 0x8 + ATTR_VOL_SPACEUSED = 0x800000 + ATTR_VOL_UUID = 0x40000 + ATTR_VOL_VALIDMASK = 0xf087ffff + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc00c4279 + BIOCGETIF = 0x4020426b + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044278 + BIOCSETF = 0x80104267 + BIOCSETFNR = 0x8010427e + BIOCSETIF = 0x8020426c + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x8000 + BSDLY = 0x8000 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_MONOTONIC = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_MONOTONIC_RAW_APPROX = 0x5 + CLOCK_PROCESS_CPUTIME_ID = 0xc + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x10 + CLOCK_UPTIME_RAW = 0x8 + CLOCK_UPTIME_RAW_APPROX = 0x9 + CLONE_NOFOLLOW = 0x1 + CLONE_NOOWNERCOPY = 0x2 + CR0 = 0x0 + CR1 = 0x1000 + CR2 = 0x2000 + CR3 = 0x3000 + CRDLY = 0x3000 + CREAD = 0x800 + CRTSCTS = 0x30000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTLIOCGINFO = 0xc0644e03 + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0x10a + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_DARWIN = 0x10a + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_EXCEPT = -0xf + EVFILT_FS = -0x9 + EVFILT_MACHPORT = -0x8 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x11 + EVFILT_THREADMARKER = 0x11 + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xa + EVFILT_VM = -0xc + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DISPATCH2 = 0x180 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG0 = 0x1000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_OOBAND = 0x2000 + EV_POLL = 0x1000 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EV_UDATA_SPECIFIC = 0x100 + EV_VANISHED = 0x200 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x4000 + FFDLY = 0x4000 + FLUSHO = 0x800000 + FSOPT_ATTR_CMN_EXTENDED = 0x20 + FSOPT_NOFOLLOW = 0x1 + FSOPT_NOINMEMUPDATE = 0x2 + FSOPT_PACK_INVAL_ATTRS = 0x8 + FSOPT_REPORT_FULLSIZE = 0x4 + FSOPT_RETURN_REALDEV = 0x200 + F_ADDFILESIGS = 0x3d + F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 + F_ADDFILESIGS_INFO = 0x67 + F_ADDFILESIGS_RETURN = 0x61 + F_ADDFILESUPPL = 0x68 + F_ADDSIGS = 0x3b + F_ALLOCATEALL = 0x4 + F_ALLOCATECONTIG = 0x2 + F_BARRIERFSYNC = 0x55 + F_CHECK_LV = 0x62 + F_CHKCLEAN = 0x29 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x43 + F_FINDSIGS = 0x4e + F_FLUSH_DATA = 0x28 + F_FREEZE_FS = 0x35 + F_FULLFSYNC = 0x33 + F_GETCODEDIR = 0x48 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETLKPID = 0x42 + F_GETNOSIGPIPE = 0x4a + F_GETOWN = 0x5 + F_GETPATH = 0x32 + F_GETPATH_MTMINFO = 0x47 + F_GETPATH_NOFIRMLINK = 0x66 + F_GETPROTECTIONCLASS = 0x3f + F_GETPROTECTIONLEVEL = 0x4d + F_GETSIGSINFO = 0x69 + F_GLOBAL_NOCACHE = 0x37 + F_LOG2PHYS = 0x31 + F_LOG2PHYS_EXT = 0x41 + F_NOCACHE = 0x30 + F_NODIRECT = 0x3e + F_OK = 0x0 + F_PATHPKG_CHECK = 0x34 + F_PEOFPOSMODE = 0x3 + F_PREALLOCATE = 0x2a + F_PUNCHHOLE = 0x63 + F_RDADVISE = 0x2c + F_RDAHEAD = 0x2d + F_RDLCK = 0x1 + F_SETBACKINGSTORE = 0x46 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETLKWTIMEOUT = 0xa + F_SETNOSIGPIPE = 0x49 + F_SETOWN = 0x6 + F_SETPROTECTIONCLASS = 0x40 + F_SETSIZE = 0x2b + F_SINGLE_WRITER = 0x4c + F_SPECULATIVE_READ = 0x65 + F_THAW_FS = 0x36 + F_TRANSCODEKEY = 0x4b + F_TRIM_ACTIVE_FILE = 0x64 + F_UNLCK = 0x2 + F_VOLPOSMODE = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_6LOWPAN = 0x40 + IFT_AAL5 = 0x31 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ATM = 0x25 + IFT_BRIDGE = 0xd1 + IFT_CARP = 0xf8 + IFT_CELLULAR = 0xff + IFT_CEPT = 0x13 + IFT_DS3 = 0x1e + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_ETHER = 0x6 + IFT_FAITH = 0x38 + IFT_FDDI = 0xf + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_GIF = 0x37 + IFT_HDH1822 = 0x3 + IFT_HIPPI = 0x2f + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IEEE1394 = 0x90 + IFT_IEEE8023ADLAG = 0x88 + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88026 = 0xa + IFT_L2VLAN = 0x87 + IFT_LAPB = 0x10 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_NSIP = 0x1b + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PDP = 0xff + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PKTAP = 0xfe + IFT_PPP = 0x17 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PTPSERIAL = 0x16 + IFT_RS232 = 0x21 + IFT_SDLC = 0x11 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_STARLAN = 0xb + IFT_STF = 0x39 + IFT_T1 = 0x12 + IFT_ULTRA = 0x1d + IFT_V35 = 0x2d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LINKLOCALNETNUM = 0xa9fe0000 + IN_LOOPBACKNET = 0x7f + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x400473d1 + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0xfe + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEP = 0x21 + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_2292DSTOPTS = 0x17 + IPV6_2292HOPLIMIT = 0x14 + IPV6_2292HOPOPTS = 0x16 + IPV6_2292NEXTHOP = 0x15 + IPV6_2292PKTINFO = 0x13 + IPV6_2292PKTOPTIONS = 0x19 + IPV6_2292RTHDR = 0x18 + IPV6_3542DSTOPTS = 0x32 + IPV6_3542HOPLIMIT = 0x2f + IPV6_3542HOPOPTS = 0x31 + IPV6_3542NEXTHOP = 0x30 + IPV6_3542PKTINFO = 0x2e + IPV6_3542RTHDR = 0x33 + IPV6_ADDR_MC_FLAGS_PREFIX = 0x20 + IPV6_ADDR_MC_FLAGS_TRANSIENT = 0x10 + IPV6_ADDR_MC_FLAGS_UNICAST_BASED = 0x30 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDV6ONLY = 0x1b + IPV6_BOUND_IF = 0x7d + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FLOW_ECN_MASK = 0x3000 + IPV6_FRAGTTL = 0x3c + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x3d + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x23 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x39 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x24 + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BLOCK_SOURCE = 0x48 + IP_BOUND_IF = 0x19 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DONTFRAG = 0x1c + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW_ADD = 0x28 + IP_FW_DEL = 0x29 + IP_FW_FLUSH = 0x2a + IP_FW_GET = 0x2c + IP_FW_RESETLOG = 0x2d + IP_FW_ZERO = 0x2b + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MF = 0x2000 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_IFINDEX = 0x42 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_NAT__XXX = 0x37 + IP_OFFMASK = 0x1fff + IP_OLD_FW_ADD = 0x32 + IP_OLD_FW_DEL = 0x33 + IP_OLD_FW_FLUSH = 0x34 + IP_OLD_FW_GET = 0x36 + IP_OLD_FW_RESETLOG = 0x38 + IP_OLD_FW_ZERO = 0x35 + IP_OPTIONS = 0x1 + IP_PKTINFO = 0x1a + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVPKTINFO = 0x1a + IP_RECVRETOPTS = 0x6 + IP_RECVTOS = 0x1b + IP_RECVTTL = 0x18 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_STRIPHDR = 0x17 + IP_TOS = 0x3 + IP_TRAFFIC_MGT_BACKGROUND = 0x41 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LOCAL_PEERCRED = 0x1 + LOCAL_PEEREPID = 0x3 + LOCAL_PEEREUUID = 0x5 + LOCAL_PEERPID = 0x2 + LOCAL_PEERTOKEN = 0x6 + LOCAL_PEERUUID = 0x4 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_CAN_REUSE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_FREE_REUSABLE = 0x7 + MADV_FREE_REUSE = 0x8 + MADV_NORMAL = 0x0 + MADV_PAGEOUT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MADV_ZERO_WIRED_PAGES = 0x6 + MAP_32BIT = 0x8000 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_JIT = 0x800 + MAP_NOCACHE = 0x400 + MAP_NOEXTEND = 0x100 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_RESERVED0080 = 0x80 + MAP_RESILIENT_CODESIGN = 0x2000 + MAP_RESILIENT_MEDIA = 0x4000 + MAP_SHARED = 0x1 + MAP_TRANSLATED_ALLOW_EXECUTE = 0x20000 + MAP_UNIX03 = 0x40000 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x400000 + MNT_CMDFLAGS = 0xf0000 + MNT_CPROTECT = 0x80 + MNT_DEFWRITE = 0x2000000 + MNT_DONTBROWSE = 0x100000 + MNT_DOVOLFS = 0x8000 + MNT_DWAIT = 0x4 + MNT_EXPORTED = 0x100 + MNT_EXT_ROOT_DATA_VOL = 0x1 + MNT_FORCE = 0x80000 + MNT_IGNORE_OWNERSHIP = 0x200000 + MNT_JOURNALED = 0x800000 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NOATIME = 0x10000000 + MNT_NOBLOCK = 0x20000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOSUID = 0x8 + MNT_NOUSERXATTR = 0x1000000 + MNT_NOWAIT = 0x2 + MNT_QUARANTINE = 0x400 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_REMOVABLE = 0x200 + MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x40000000 + MNT_STRICTATIME = 0x80000000 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UNKNOWNPERMISSIONS = 0x200000 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0xd7f0f7ff + MNT_WAIT = 0x1 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_FLUSH = 0x400 + MSG_HAVEMORE = 0x2000 + MSG_HOLD = 0x800 + MSG_NEEDSA = 0x10000 + MSG_NOSIGNAL = 0x80000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_RCVMORE = 0x4000 + MSG_SEND = 0x1000 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITSTREAM = 0x200 + MS_ASYNC = 0x1 + MS_DEACTIVATE = 0x8 + MS_INVALIDATE = 0x2 + MS_KILLPAGES = 0x4 + MS_SYNC = 0x10 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_DUMP2 = 0x7 + NET_RT_FLAGS = 0x2 + NET_RT_FLAGS_PRIV = 0xa + NET_RT_IFLIST = 0x3 + NET_RT_IFLIST2 = 0x6 + NET_RT_MAXID = 0xb + NET_RT_STAT = 0x4 + NET_RT_TRASH = 0x5 + NFDBITS = 0x20 + NL0 = 0x0 + NL1 = 0x100 + NL2 = 0x200 + NL3 = 0x300 + NLDLY = 0x300 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ABSOLUTE = 0x8 + NOTE_ATTRIB = 0x8 + NOTE_BACKGROUND = 0x40 + NOTE_CHILD = 0x4 + NOTE_CRITICAL = 0x20 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXITSTATUS = 0x4000000 + NOTE_EXIT_CSERROR = 0x40000 + NOTE_EXIT_DECRYPTFAIL = 0x10000 + NOTE_EXIT_DETAIL = 0x2000000 + NOTE_EXIT_DETAIL_MASK = 0x70000 + NOTE_EXIT_MEMORY = 0x20000 + NOTE_EXIT_REPARENTED = 0x80000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FORK = 0x40000000 + NOTE_FUNLOCK = 0x100 + NOTE_LEEWAY = 0x10 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_MACHTIME = 0x100 + NOTE_MACH_CONTINUOUS_TIME = 0x80 + NOTE_NONE = 0x80 + NOTE_NSECONDS = 0x4 + NOTE_OOB = 0x2 + NOTE_PCTRLMASK = -0x100000 + NOTE_PDATAMASK = 0xfffff + NOTE_REAP = 0x10000000 + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_SIGNAL = 0x8000000 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x2 + NOTE_VM_ERROR = 0x10000000 + NOTE_VM_PRESSURE = 0x80000000 + NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000 + NOTE_VM_PRESSURE_TERMINATE = 0x40000000 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OFDEL = 0x20000 + OFILL = 0x80 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_ALERT = 0x20000000 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x1000000 + O_CREAT = 0x200 + O_DIRECTORY = 0x100000 + O_DP_GETRAWENCRYPTED = 0x1 + O_DP_GETRAWUNENCRYPTED = 0x2 + O_DSYNC = 0x400000 + O_EVTONLY = 0x8000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x20000 + O_NOFOLLOW = 0x100 + O_NOFOLLOW_ANY = 0x20000000 + O_NONBLOCK = 0x4 + O_POPUP = 0x80000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYMLINK = 0x200000 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PT_ATTACH = 0xa + PT_ATTACHEXC = 0xe + PT_CONTINUE = 0x7 + PT_DENY_ATTACH = 0x1f + PT_DETACH = 0xb + PT_FIRSTMACH = 0x20 + PT_FORCEQUOTA = 0x1e + PT_KILL = 0x8 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_READ_U = 0x3 + PT_SIGEXC = 0xc + PT_STEP = 0x9 + PT_THUPDATE = 0xd + PT_TRACE_ME = 0x0 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + PT_WRITE_U = 0x6 + RLIMIT_AS = 0x5 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_CPU_USAGE_MONITOR = 0x2 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CLONING = 0x100 + RTF_CONDEMNED = 0x2000000 + RTF_DEAD = 0x20000000 + RTF_DELCLONE = 0x80 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_GLOBAL = 0x40000000 + RTF_HOST = 0x4 + RTF_IFREF = 0x4000000 + RTF_IFSCOPE = 0x1000000 + RTF_LLDATA = 0x400 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_NOIFREF = 0x2000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_PROXY = 0x8000000 + RTF_REJECT = 0x8 + RTF_ROUTER = 0x10000000 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_WASCLONED = 0x20000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_GET2 = 0x14 + RTM_IFINFO = 0xe + RTM_IFINFO2 = 0x12 + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_NEWMADDR2 = 0x13 + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SCM_TIMESTAMP_MONOTONIC = 0x4 + SEEK_CUR = 0x1 + SEEK_DATA = 0x4 + SEEK_END = 0x2 + SEEK_HOLE = 0x3 + SEEK_SET = 0x0 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCARPIPLL = 0xc0206928 + SIOCATMARK = 0x40047307 + SIOCAUTOADDR = 0xc0206926 + SIOCAUTONETMASK = 0x80206927 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFPHYADDR = 0x80206941 + SIOCGDRVSPEC = 0xc028697b + SIOCGETVLAN = 0xc020697f + SIOCGHIWAT = 0x40047301 + SIOCGIF6LOWPAN = 0xc02069c5 + SIOCGIFADDR = 0xc0206921 + SIOCGIFALTMTU = 0xc0206948 + SIOCGIFASYNCMAP = 0xc020697c + SIOCGIFBOND = 0xc0206947 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020695b + SIOCGIFCONF = 0xc00c6924 + SIOCGIFDEVMTU = 0xc0206944 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFFUNCTIONALTYPE = 0xc02069ad + SIOCGIFGENERIC = 0xc020693a + SIOCGIFKPI = 0xc0206987 + SIOCGIFMAC = 0xc0206982 + SIOCGIFMEDIA = 0xc02c6938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206940 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc020693f + SIOCGIFSTATUS = 0xc331693d + SIOCGIFVLAN = 0xc020697f + SIOCGIFWAKEFLAGS = 0xc0206988 + SIOCGIFXMEDIA = 0xc02c6948 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCIFCREATE = 0xc0206978 + SIOCIFCREATE2 = 0xc020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106981 + SIOCRSLVMULTI = 0xc010693b + SIOCSDRVSPEC = 0x8028697b + SIOCSETVLAN = 0x8020697e + SIOCSHIWAT = 0x80047300 + SIOCSIF6LOWPAN = 0x802069c4 + SIOCSIFADDR = 0x8020690c + SIOCSIFALTMTU = 0x80206945 + SIOCSIFASYNCMAP = 0x8020697d + SIOCSIFBOND = 0x80206946 + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020695a + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFKPI = 0x80206986 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206983 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x8040693e + SIOCSIFPHYS = 0x80206936 + SIOCSIFVLAN = 0x8020697e + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_DONTTRUNC = 0x2000 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1010 + SO_LINGER = 0x80 + SO_LINGER_SEC = 0x1080 + SO_NETSVC_MARKING_LEVEL = 0x1119 + SO_NET_SERVICE_TYPE = 0x1116 + SO_NKE = 0x1021 + SO_NOADDRERR = 0x1023 + SO_NOSIGPIPE = 0x1022 + SO_NOTIFYCONFLICT = 0x1026 + SO_NP_EXTENSIONS = 0x1083 + SO_NREAD = 0x1020 + SO_NUMRCVPKT = 0x1112 + SO_NWRITE = 0x1024 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1011 + SO_RANDOMPORT = 0x1082 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_REUSESHAREUID = 0x1025 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TIMESTAMP_MONOTONIC = 0x800 + SO_TRACKER_ATTRIBUTE_FLAGS_APP_APPROVED = 0x1 + SO_TRACKER_ATTRIBUTE_FLAGS_DOMAIN_SHORT = 0x4 + SO_TRACKER_ATTRIBUTE_FLAGS_TRACKER = 0x2 + SO_TRACKER_TRANSPARENCY_VERSION = 0x3 + SO_TYPE = 0x1008 + SO_UPCALLCLOSEWAIT = 0x1027 + SO_USELOOPBACK = 0x40 + SO_WANTMORE = 0x4000 + SO_WANTOOBFLAG = 0x8000 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x400 + TAB2 = 0x800 + TAB3 = 0x4 + TABDLY = 0xc04 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_CC = 0xb + TCPOPT_CCECHO = 0xd + TCPOPT_CCNEW = 0xc + TCPOPT_EOL = 0x0 + TCPOPT_FASTOPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_CONNECTIONTIMEOUT = 0x20 + TCP_CONNECTION_INFO = 0x106 + TCP_ENABLE_ECN = 0x104 + TCP_FASTOPEN = 0x105 + TCP_KEEPALIVE = 0x10 + TCP_KEEPCNT = 0x102 + TCP_KEEPINTVL = 0x101 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MINMSS = 0xd8 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_NOTSENT_LOWAT = 0x201 + TCP_RXT_CONNDROPTIME = 0x80 + TCP_RXT_FINDROP = 0x100 + TCP_SENDMOREACKS = 0x103 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x40107458 + TIOCDRAIN = 0x2000745e + TIOCDSIMICROCODE = 0x20007455 + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x40487413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGWINSZ = 0x40087468 + TIOCIXOFF = 0x20007480 + TIOCIXON = 0x20007481 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMODG = 0x40047403 + TIOCMODS = 0x80047404 + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTYGNAME = 0x40807453 + TIOCPTYGRANT = 0x20007454 + TIOCPTYUNLK = 0x20007452 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCONS = 0x20007463 + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x80487414 + TIOCSETAF = 0x80487416 + TIOCSETAW = 0x80487415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2000745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMADDR_CID_ANY = 0xffffffff + VMADDR_CID_HOST = 0x2 + VMADDR_CID_HYPERVISOR = 0x0 + VMADDR_CID_RESERVED = 0x1 + VMADDR_PORT_ANY = 0xffffffff + VMIN = 0x10 + VM_LOADAVG = 0x2 + VM_MACHFACTOR = 0x4 + VM_MAXID = 0x6 + VM_METER = 0x1 + VM_SWAPUSAGE = 0x5 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VT0 = 0x0 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x10 + WCOREFLAG = 0x80 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOWAIT = 0x20 + WORDSIZE = 0x40 + WSTOPPED = 0x8 + WUNTRACED = 0x2 + XATTR_CREATE = 0x2 + XATTR_NODEFAULT = 0x10 + XATTR_NOFOLLOW = 0x1 + XATTR_NOSECURITY = 0x8 + XATTR_REPLACE = 0x4 + XATTR_SHOWCOMPRESSION = 0x20 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 9c7c5e165..440900112 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -1297,6 +1297,11 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 SCM_TIME_INFO = 0x7 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index b265abb25..64520d312 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -1298,6 +1298,11 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 SCM_TIME_INFO = 0x7 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 3df99f285..99e9a0e06 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -1276,6 +1276,11 @@ const ( SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index 218d39906..4c8377114 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -1298,6 +1298,11 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 SCM_TIME_INFO = 0x7 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index c3fa22486..dfa9bd938 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1,4 +1,4 @@ -// Code generated by mkmerge.go; DO NOT EDIT. +// Code generated by mkmerge; DO NOT EDIT. //go:build linux // +build linux @@ -38,7 +38,8 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2d + AF_MAX = 0x2e + AF_MCTP = 0x2d AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -116,6 +117,7 @@ const ( ARPHRD_LAPB = 0x204 ARPHRD_LOCALTLK = 0x305 ARPHRD_LOOPBACK = 0x304 + ARPHRD_MCTP = 0x122 ARPHRD_METRICOM = 0x17 ARPHRD_NETLINK = 0x338 ARPHRD_NETROM = 0x0 @@ -182,6 +184,7 @@ const ( BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_KPROBE_MULTI_RETURN = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 @@ -189,6 +192,8 @@ const ( BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 BPF_F_TEST_STATE_FREQ = 0x8 + BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 + BPF_F_XDP_HAS_FRAGS = 0x20 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -228,7 +233,11 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_BTF_ID = 0x3 BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_FUNC = 0x4 + BPF_PSEUDO_KFUNC_CALL = 0x2 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_IDX = 0x5 + BPF_PSEUDO_MAP_IDX_VALUE = 0x6 BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 @@ -254,6 +263,17 @@ const ( BUS_USB = 0x3 BUS_VIRTUAL = 0x6 CAN_BCM = 0x2 + CAN_CTRLMODE_3_SAMPLES = 0x4 + CAN_CTRLMODE_BERR_REPORTING = 0x10 + CAN_CTRLMODE_CC_LEN8_DLC = 0x100 + CAN_CTRLMODE_FD = 0x20 + CAN_CTRLMODE_FD_NON_ISO = 0x80 + CAN_CTRLMODE_LISTENONLY = 0x2 + CAN_CTRLMODE_LOOPBACK = 0x1 + CAN_CTRLMODE_ONE_SHOT = 0x8 + CAN_CTRLMODE_PRESUME_ACK = 0x40 + CAN_CTRLMODE_TDC_AUTO = 0x200 + CAN_CTRLMODE_TDC_MANUAL = 0x400 CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d CAN_EFF_MASK = 0x1fffffff @@ -331,6 +351,7 @@ const ( CAN_RTR_FLAG = 0x40000000 CAN_SFF_ID_BITS = 0xb CAN_SFF_MASK = 0x7ff + CAN_TERMINATION_DISABLED = 0x0 CAN_TP16 = 0x3 CAN_TP20 = 0x4 CAP_AUDIT_CONTROL = 0x1e @@ -375,9 +396,11 @@ const ( CAP_SYS_TIME = 0x19 CAP_SYS_TTY_CONFIG = 0x1a CAP_WAKE_ALARM = 0x23 + CEPH_SUPER_MAGIC = 0xc36400 CFLUSH = 0xf CGROUP2_SUPER_MAGIC = 0x63677270 CGROUP_SUPER_MAGIC = 0x27e0eb + CIFS_SUPER_MAGIC = 0xff534d42 CLOCK_BOOTTIME = 0x7 CLOCK_BOOTTIME_ALARM = 0x9 CLOCK_DEFAULT = 0x0 @@ -468,6 +491,7 @@ const ( DM_DEV_WAIT = 0xc138fd08 DM_DIR = "mapper" DM_GET_TARGET_VERSION = 0xc138fd11 + DM_IMA_MEASUREMENT_FLAG = 0x80000 DM_INACTIVE_PRESENT_FLAG = 0x40 DM_INTERNAL_SUSPEND_FLAG = 0x40000 DM_IOCTL = 0xfd @@ -475,6 +499,8 @@ const ( DM_LIST_VERSIONS = 0xc138fd0d DM_MAX_TYPE_NAME = 0x10 DM_NAME_LEN = 0x80 + DM_NAME_LIST_FLAG_DOESNT_HAVE_UUID = 0x2 + DM_NAME_LIST_FLAG_HAS_UUID = 0x1 DM_NOFLUSH_FLAG = 0x800 DM_PERSISTENT_DEV_FLAG = 0x8 DM_QUERY_INACTIVE_TABLE_FLAG = 0x1000 @@ -494,9 +520,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2021-02-01)" + DM_VERSION_EXTRA = "-ioctl (2022-02-22)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x2c + DM_VERSION_MINOR = 0x2e DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -689,6 +715,7 @@ const ( ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be ETH_P_ERSPAN2 = 0x22eb + ETH_P_ETHERCAT = 0x88a4 ETH_P_FCOE = 0x8906 ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 @@ -710,6 +737,7 @@ const ( ETH_P_LOOPBACK = 0x9000 ETH_P_MACSEC = 0x88e5 ETH_P_MAP = 0xf9 + ETH_P_MCTP = 0xfa ETH_P_MOBITEX = 0x15 ETH_P_MPLS_MC = 0x8848 ETH_P_MPLS_UC = 0x8847 @@ -725,6 +753,7 @@ const ( ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 ETH_P_PREAUTH = 0x88c7 + ETH_P_PROFINET = 0x8892 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -732,6 +761,7 @@ const ( ETH_P_QINQ2 = 0x9200 ETH_P_QINQ3 = 0x9300 ETH_P_RARP = 0x8035 + ETH_P_REALTEK = 0x8899 ETH_P_SCA = 0x6007 ETH_P_SLOW = 0x8809 ETH_P_SNAP = 0x5 @@ -745,7 +775,23 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EV_ABS = 0x3 + EV_CNT = 0x20 + EV_FF = 0x15 + EV_FF_STATUS = 0x17 + EV_KEY = 0x1 + EV_LED = 0x11 + EV_MAX = 0x1f + EV_MSC = 0x4 + EV_PWR = 0x16 + EV_REL = 0x2 + EV_REP = 0x14 + EV_SND = 0x12 + EV_SW = 0x5 + EV_SYN = 0x0 + EV_VERSION = 0x10001 EXABYTE_ENABLE_NEST = 0xf0 + EXFAT_SUPER_MAGIC = 0x2011bab0 EXT2_SUPER_MAGIC = 0xef53 EXT3_SUPER_MAGIC = 0xef53 EXT4_SUPER_MAGIC = 0xef53 @@ -783,13 +829,20 @@ const ( FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 + FAN_EPIDFD = -0x2 FAN_EVENT_INFO_TYPE_DFID = 0x3 FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 + FAN_EVENT_INFO_TYPE_ERROR = 0x5 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc + FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa + FAN_EVENT_INFO_TYPE_PIDFD = 0x4 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 + FAN_FS_ERROR = 0x8000 FAN_MARK_ADD = 0x1 FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_EVICTABLE = 0x200 FAN_MARK_FILESYSTEM = 0x100 FAN_MARK_FLUSH = 0x80 FAN_MARK_IGNORED_MASK = 0x20 @@ -805,22 +858,34 @@ const ( FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 + FAN_NOPIDFD = -0x1 FAN_ONDIR = 0x40000000 FAN_OPEN = 0x20 FAN_OPEN_EXEC = 0x1000 FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_RENAME = 0x10000000 FAN_REPORT_DFID_NAME = 0xc00 + FAN_REPORT_DFID_NAME_TARGET = 0x1e00 FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FID = 0x200 FAN_REPORT_NAME = 0x800 + FAN_REPORT_PIDFD = 0x80 + FAN_REPORT_TARGET_FID = 0x1000 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 + FIB_RULE_DEV_DETACHED = 0x8 + FIB_RULE_FIND_SADDR = 0x10000 + FIB_RULE_IIF_DETACHED = 0x8 + FIB_RULE_INVERT = 0x2 + FIB_RULE_OIF_DETACHED = 0x10 + FIB_RULE_PERMANENT = 0x1 + FIB_RULE_UNRESOLVED = 0x4 FIDEDUPERANGE = 0xc0189436 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" @@ -883,6 +948,7 @@ const ( FS_VERITY_METADATA_TYPE_DESCRIPTOR = 0x2 FS_VERITY_METADATA_TYPE_MERKLE_TREE = 0x1 FS_VERITY_METADATA_TYPE_SIGNATURE = 0x3 + FUSE_SUPER_MAGIC = 0x65735546 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -981,12 +1047,6 @@ const ( HPFS_SUPER_MAGIC = 0xf995e849 HUGETLBFS_MAGIC = 0x958458f6 IBSHIFT = 0x10 - ICMPV6_FILTER = 0x1 - ICMPV6_FILTER_BLOCK = 0x1 - ICMPV6_FILTER_BLOCKOTHERS = 0x3 - ICMPV6_FILTER_PASS = 0x2 - ICMPV6_FILTER_PASSONLY = 0x4 - ICMP_FILTER = 0x1 ICRNL = 0x100 IFA_F_DADFAILED = 0x8 IFA_F_DEPRECATED = 0x20 @@ -1001,7 +1061,7 @@ const ( IFA_F_STABLE_PRIVACY = 0x800 IFA_F_TEMPORARY = 0x1 IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa + IFA_MAX = 0xb IFF_ALLMULTI = 0x200 IFF_ATTACH_QUEUE = 0x200 IFF_AUTOMEDIA = 0x4000 @@ -1242,21 +1302,28 @@ const ( IP_XFRM_POLICY = 0x11 ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IUTF8 = 0x4000 IXANY = 0x800 JFFS2_SUPER_MAGIC = 0x72b6 + KCMPROTO_CONNECTED = 0x0 + KCM_RECV_DISABLE = 0x1 KEXEC_ARCH_386 = 0x30000 KEXEC_ARCH_68K = 0x40000 KEXEC_ARCH_AARCH64 = 0xb70000 KEXEC_ARCH_ARM = 0x280000 KEXEC_ARCH_DEFAULT = 0x0 KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_LOONGARCH = 0x1020000 KEXEC_ARCH_MASK = 0xffff0000 KEXEC_ARCH_MIPS = 0x80000 KEXEC_ARCH_MIPS_LE = 0xa0000 KEXEC_ARCH_PARISC = 0xf0000 KEXEC_ARCH_PPC = 0x140000 KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_RISCV = 0xf30000 KEXEC_ARCH_S390 = 0x160000 KEXEC_ARCH_SH = 0x2a0000 KEXEC_ARCH_X86_64 = 0x3e0000 @@ -1332,6 +1399,21 @@ const ( KEY_SPEC_THREAD_KEYRING = -0x1 KEY_SPEC_USER_KEYRING = -0x4 KEY_SPEC_USER_SESSION_KEYRING = -0x5 + LANDLOCK_ACCESS_FS_EXECUTE = 0x1 + LANDLOCK_ACCESS_FS_MAKE_BLOCK = 0x800 + LANDLOCK_ACCESS_FS_MAKE_CHAR = 0x40 + LANDLOCK_ACCESS_FS_MAKE_DIR = 0x80 + LANDLOCK_ACCESS_FS_MAKE_FIFO = 0x400 + LANDLOCK_ACCESS_FS_MAKE_REG = 0x100 + LANDLOCK_ACCESS_FS_MAKE_SOCK = 0x200 + LANDLOCK_ACCESS_FS_MAKE_SYM = 0x1000 + LANDLOCK_ACCESS_FS_READ_DIR = 0x8 + LANDLOCK_ACCESS_FS_READ_FILE = 0x4 + LANDLOCK_ACCESS_FS_REFER = 0x2000 + LANDLOCK_ACCESS_FS_REMOVE_DIR = 0x10 + LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 + LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 + LANDLOCK_CREATE_RULESET_VERSION = 0x1 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef LINUX_REBOOT_CMD_HALT = 0xcdef0123 @@ -1382,6 +1464,8 @@ const ( MADV_NOHUGEPAGE = 0xf MADV_NORMAL = 0x0 MADV_PAGEOUT = 0x15 + MADV_POPULATE_READ = 0x16 + MADV_POPULATE_WRITE = 0x17 MADV_RANDOM = 0x1 MADV_REMOVE = 0x9 MADV_SEQUENTIAL = 0x2 @@ -1435,8 +1519,21 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 + MOUNT_ATTR_IDMAP = 0x100000 + MOUNT_ATTR_NOATIME = 0x10 + MOUNT_ATTR_NODEV = 0x4 + MOUNT_ATTR_NODIRATIME = 0x80 + MOUNT_ATTR_NOEXEC = 0x8 + MOUNT_ATTR_NOSUID = 0x2 + MOUNT_ATTR_NOSYMFOLLOW = 0x200000 + MOUNT_ATTR_RDONLY = 0x1 + MOUNT_ATTR_RELATIME = 0x0 + MOUNT_ATTR_SIZE_VER0 = 0x20 + MOUNT_ATTR_STRICTATIME = 0x20 + MOUNT_ATTR__ATIME = 0x70 MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 @@ -1636,11 +1733,12 @@ const ( NFNL_MSG_BATCH_END = 0x11 NFNL_NFA_NEST = 0x8000 NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc + NFNL_SUBSYS_COUNT = 0xd NFNL_SUBSYS_CTHELPER = 0x9 NFNL_SUBSYS_CTNETLINK = 0x1 NFNL_SUBSYS_CTNETLINK_EXP = 0x2 NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 + NFNL_SUBSYS_HOOK = 0xc NFNL_SUBSYS_IPSET = 0x6 NFNL_SUBSYS_NFTABLES = 0xa NFNL_SUBSYS_NFT_COMPAT = 0xb @@ -1667,6 +1765,7 @@ const ( NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 + NLM_F_BULK = 0x200 NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 @@ -1756,20 +1855,30 @@ const ( PERF_ATTR_SIZE_VER4 = 0x68 PERF_ATTR_SIZE_VER5 = 0x70 PERF_ATTR_SIZE_VER6 = 0x78 + PERF_ATTR_SIZE_VER7 = 0x80 PERF_AUX_FLAG_COLLISION = 0x8 + PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT = 0x0 + PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW = 0x100 PERF_AUX_FLAG_OVERWRITE = 0x2 PERF_AUX_FLAG_PARTIAL = 0x4 + PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK = 0xff00 PERF_AUX_FLAG_TRUNCATED = 0x1 PERF_FLAG_FD_CLOEXEC = 0x8 PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 PERF_FLAG_PID_CGROUP = 0x4 + PERF_HW_EVENT_MASK = 0xffffffff PERF_MAX_CONTEXTS_PER_STACK = 0x8 PERF_MAX_STACK_DEPTH = 0x7f PERF_MEM_BLK_ADDR = 0x4 PERF_MEM_BLK_DATA = 0x2 PERF_MEM_BLK_NA = 0x1 PERF_MEM_BLK_SHIFT = 0x28 + PERF_MEM_HOPS_0 = 0x1 + PERF_MEM_HOPS_1 = 0x2 + PERF_MEM_HOPS_2 = 0x3 + PERF_MEM_HOPS_3 = 0x4 + PERF_MEM_HOPS_SHIFT = 0x2b PERF_MEM_LOCK_LOCKED = 0x2 PERF_MEM_LOCK_NA = 0x1 PERF_MEM_LOCK_SHIFT = 0x18 @@ -1822,6 +1931,7 @@ const ( PERF_MEM_TLB_OS = 0x40 PERF_MEM_TLB_SHIFT = 0x1a PERF_MEM_TLB_WK = 0x20 + PERF_PMU_TYPE_SHIFT = 0x20 PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER = 0x1 PERF_RECORD_MISC_COMM_EXEC = 0x2000 PERF_RECORD_MISC_CPUMODE_MASK = 0x7 @@ -1921,7 +2031,18 @@ const ( PR_PAC_APGAKEY = 0x10 PR_PAC_APIAKEY = 0x1 PR_PAC_APIBKEY = 0x2 + PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 + PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_SCHED_CORE = 0x3e + PR_SCHED_CORE_CREATE = 0x1 + PR_SCHED_CORE_GET = 0x0 + PR_SCHED_CORE_MAX = 0x4 + PR_SCHED_CORE_SCOPE_PROCESS_GROUP = 0x2 + PR_SCHED_CORE_SCOPE_THREAD = 0x0 + PR_SCHED_CORE_SCOPE_THREAD_GROUP = 0x1 + PR_SCHED_CORE_SHARE_FROM = 0x3 + PR_SCHED_CORE_SHARE_TO = 0x2 PR_SET_CHILD_SUBREAPER = 0x24 PR_SET_DUMPABLE = 0x4 PR_SET_ENDIAN = 0x14 @@ -1960,11 +2081,19 @@ const ( PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SET_VMA = 0x53564d41 + PR_SET_VMA_ANON_NAME = 0x0 + PR_SME_GET_VL = 0x40 + PR_SME_SET_VL = 0x3f + PR_SME_SET_VL_ONEXEC = 0x40000 + PR_SME_VL_INHERIT = 0x20000 + PR_SME_VL_LEN_MASK = 0xffff PR_SPEC_DISABLE = 0x4 PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 + PR_SPEC_L1D_FLUSH = 0x2 PR_SPEC_NOT_AFFECTED = 0x0 PR_SPEC_PRCTL = 0x1 PR_SPEC_STORE_BYPASS = 0x0 @@ -2003,6 +2132,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_RSEQ_CONFIGURATION = 0x420f PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 @@ -2041,6 +2171,10 @@ const ( PTRACE_SYSCALL_INFO_NONE = 0x0 PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 + P_ALL = 0x0 + P_PGID = 0x2 + P_PID = 0x1 + P_PIDFD = 0x3 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 RAMFS_MAGIC = 0x858458f6 @@ -2099,12 +2233,24 @@ const ( RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 RTC_AF = 0x20 + RTC_BSM_DIRECT = 0x1 + RTC_BSM_DISABLED = 0x0 + RTC_BSM_LEVEL = 0x2 + RTC_BSM_STANDBY = 0x3 RTC_FEATURE_ALARM = 0x0 + RTC_FEATURE_ALARM_RES_2S = 0x3 RTC_FEATURE_ALARM_RES_MINUTE = 0x1 - RTC_FEATURE_CNT = 0x3 + RTC_FEATURE_ALARM_WAKEUP_ONLY = 0x7 + RTC_FEATURE_BACKUP_SWITCH_MODE = 0x6 + RTC_FEATURE_CNT = 0x8 + RTC_FEATURE_CORRECTION = 0x5 RTC_FEATURE_NEED_WEEK_DAY = 0x2 + RTC_FEATURE_UPDATE_INTERRUPT = 0x4 RTC_IRQF = 0x80 RTC_MAX_FREQ = 0x2000 + RTC_PARAM_BACKUP_SWITCH_MODE = 0x2 + RTC_PARAM_CORRECTION = 0x1 + RTC_PARAM_FEATURES = 0x0 RTC_PF = 0x40 RTC_UF = 0x10 RTF_ADDRCLASSMASK = 0xf8000000 @@ -2163,12 +2309,14 @@ const ( RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 RTM_DELNEXTHOP = 0x69 + RTM_DELNEXTHOPBUCKET = 0x75 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 RTM_DELRULE = 0x21 RTM_DELTCLASS = 0x29 RTM_DELTFILTER = 0x2d + RTM_DELTUNNEL = 0x79 RTM_DELVLAN = 0x71 RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 @@ -2193,6 +2341,7 @@ const ( RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 RTM_GETNEXTHOP = 0x6a + RTM_GETNEXTHOPBUCKET = 0x76 RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -2200,8 +2349,9 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e + RTM_GETTUNNEL = 0x7a RTM_GETVLAN = 0x72 - RTM_MAX = 0x73 + RTM_MAX = 0x7b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -2215,6 +2365,7 @@ const ( RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 RTM_NEWNEXTHOP = 0x68 + RTM_NEWNEXTHOPBUCKET = 0x74 RTM_NEWNSID = 0x58 RTM_NEWNVLAN = 0x70 RTM_NEWPREFIX = 0x34 @@ -2224,11 +2375,13 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x19 - RTM_NR_MSGTYPES = 0x64 + RTM_NEWTUNNEL = 0x78 + RTM_NR_FAMILIES = 0x1b + RTM_NR_MSGTYPES = 0x6c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 + RTM_SETSTATS = 0x5f RTNH_ALIGNTO = 0x4 RTNH_COMPARE_MASK = 0x59 RTNH_F_DEAD = 0x1 @@ -2253,6 +2406,7 @@ const ( RTPROT_MROUTED = 0x11 RTPROT_MRT = 0xa RTPROT_NTK = 0xf + RTPROT_OPENR = 0x63 RTPROT_OSPF = 0xbc RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 @@ -2283,7 +2437,14 @@ const ( SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECRETMEM_MAGIC = 0x5345434d SECURITYFS_MAGIC = 0x73636673 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_MAX = 0x4 + SEEK_SET = 0x0 SELINUX_MAGIC = 0xf97cff8c SHUT_RD = 0x0 SHUT_RDWR = 0x2 @@ -2344,6 +2505,9 @@ const ( SIOCGSTAMPNS = 0x8907 SIOCGSTAMPNS_OLD = 0x8907 SIOCGSTAMP_OLD = 0x8906 + SIOCKCMATTACH = 0x89e0 + SIOCKCMCLONE = 0x89e2 + SIOCKCMUNATTACH = 0x89e1 SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d @@ -2386,14 +2550,21 @@ const ( SMART_STATUS = 0xda SMART_WRITE_LOG_SECTOR = 0xd6 SMART_WRITE_THRESHOLDS = 0xd7 + SMB2_SUPER_MAGIC = 0xfe534d42 SMB_SUPER_MAGIC = 0x517b SOCKFS_MAGIC = 0x534f434b + SOCK_BUF_LOCK_MASK = 0x3 SOCK_DCCP = 0x6 SOCK_IOC_TYPE = 0x89 SOCK_PACKET = 0xa SOCK_RAW = 0x3 + SOCK_RCVBUF_LOCK = 0x2 SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 + SOCK_SNDBUF_LOCK = 0x1 + SOCK_TXREHASH_DEFAULT = 0xff + SOCK_TXREHASH_DISABLED = 0x0 + SOCK_TXREHASH_ENABLED = 0x1 SOL_AAL = 0x109 SOL_ALG = 0x117 SOL_ATM = 0x108 @@ -2409,6 +2580,8 @@ const ( SOL_IUCV = 0x115 SOL_KCM = 0x119 SOL_LLC = 0x10c + SOL_MCTP = 0x11d + SOL_MPTCP = 0x11c SOL_NETBEUI = 0x10b SOL_NETLINK = 0x10e SOL_NFC = 0x118 @@ -2450,6 +2623,8 @@ const ( SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 SO_VM_SOCKETS_BUFFER_SIZE = 0x0 SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 + SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW = 0x8 + SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD = 0x6 SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 SO_VM_SOCKETS_TRUSTED = 0x5 @@ -2522,7 +2697,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xa + TASKSTATS_VERSION = 0xd TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 @@ -2530,6 +2705,14 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2736,6 +2919,13 @@ const ( WDIOS_TEMPPANIC = 0x4 WDIOS_UNKNOWN = -0x1 WEXITED = 0x4 + WGALLOWEDIP_A_MAX = 0x3 + WGDEVICE_A_MAX = 0x8 + WGPEER_A_MAX = 0xa + WG_CMD_MAX = 0x1 + WG_GENL_NAME = "wireguard" + WG_GENL_VERSION = 0x1 + WG_KEY_LEN = 0x20 WIN_ACKMEDIACHANGE = 0xdb WIN_CHECKPOWERMODE1 = 0xe5 WIN_CHECKPOWERMODE2 = 0x98 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 09fc559ed..274e2dabd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -5,7 +5,7 @@ // +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 _const.go package unix @@ -147,6 +147,7 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x400c4d19 OTPGETREGIONCOUNT = 0x40044d0e OTPGETREGIONINFO = 0x400c4d0f OTPLOCK = 0x800c4d10 @@ -249,6 +250,8 @@ const ( RTC_EPOCH_SET = 0x4004700e RTC_IRQP_READ = 0x8004700b RTC_IRQP_SET = 0x4004700c + RTC_PARAM_GET = 0x40187013 + RTC_PARAM_SET = 0x40187014 RTC_PIE_OFF = 0x7006 RTC_PIE_ON = 0x7005 RTC_PLL_GET = 0x801c7011 @@ -292,6 +295,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 @@ -308,6 +312,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 @@ -321,9 +326,11 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 + SO_RESERVE_MEM = 0x49 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -344,6 +351,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 75730cc22..95b6eeedf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -5,7 +5,7 @@ // +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 _const.go package unix @@ -147,6 +147,7 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x400c4d19 OTPGETREGIONCOUNT = 0x40044d0e OTPGETREGIONINFO = 0x400c4d0f OTPLOCK = 0x800c4d10 @@ -250,6 +251,8 @@ const ( RTC_EPOCH_SET = 0x4008700e RTC_IRQP_READ = 0x8008700b RTC_IRQP_SET = 0x4008700c + RTC_PARAM_GET = 0x40187013 + RTC_PARAM_SET = 0x40187014 RTC_PIE_OFF = 0x7006 RTC_PIE_ON = 0x7005 RTC_PLL_GET = 0x80207011 @@ -293,6 +296,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 @@ -309,6 +313,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 @@ -322,9 +327,11 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 + SO_RESERVE_MEM = 0x49 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -345,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 127cf17ad..918cd130e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -5,7 +5,7 @@ // +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x400c4d19 OTPGETREGIONCOUNT = 0x40044d0e OTPGETREGIONINFO = 0x400c4d0f OTPLOCK = 0x800c4d10 @@ -256,6 +257,8 @@ const ( RTC_EPOCH_SET = 0x4004700e RTC_IRQP_READ = 0x8004700b RTC_IRQP_SET = 0x4004700c + RTC_PARAM_GET = 0x40187013 + RTC_PARAM_SET = 0x40187014 RTC_PIE_OFF = 0x7006 RTC_PIE_ON = 0x7005 RTC_PLL_GET = 0x801c7011 @@ -299,6 +302,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 @@ -315,6 +319,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 @@ -328,9 +333,11 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 + SO_RESERVE_MEM = 0x49 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -351,6 +358,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 957ca1ff1..3907dc5a9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -5,7 +5,7 @@ // +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char _const.go package unix @@ -148,6 +148,7 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x400c4d19 OTPGETREGIONCOUNT = 0x40044d0e OTPGETREGIONINFO = 0x400c4d0f OTPLOCK = 0x800c4d10 @@ -246,6 +247,8 @@ const ( RTC_EPOCH_SET = 0x4008700e RTC_IRQP_READ = 0x8008700b RTC_IRQP_SET = 0x4008700c + RTC_PARAM_GET = 0x40187013 + RTC_PARAM_SET = 0x40187014 RTC_PIE_OFF = 0x7006 RTC_PIE_ON = 0x7005 RTC_PLL_GET = 0x80207011 @@ -289,6 +292,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 @@ -305,6 +309,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 @@ -318,9 +323,11 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 + SO_RESERVE_MEM = 0x49 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -341,6 +348,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 @@ -505,6 +513,7 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + ZA_MAGIC = 0x54366345 _HIDIOCGRAWNAME = 0x80804804 _HIDIOCGRAWPHYS = 0x80404805 _HIDIOCGRAWUNIQ = 0x80404808 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go new file mode 100644 index 000000000..03d5c105a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -0,0 +1,818 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/include +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build loong64 && linux +// +build loong64,linux + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go + +package unix + +import "syscall" + +const ( + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x80081270 + BLKBSZSET = 0x40081271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80081272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d + FLUSHO = 0x1000 + FPU_CTX_MAGIC = 0x46505501 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x8000 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 + NFDBITS = 0x40 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 + OLCUC = 0x2 + ONLCR = 0x4 + OTPERASE = 0x400c4d19 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCBRIDGECHAN = 0x40047435 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8010743f + PPPIOCGIDLE32 = 0x8008743f + PPPIOCGIDLE64 = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCSACTIVE = 0x40107446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x4010744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40107447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCUNBRIDGECHAN = 0x7434 + PPPIOCXFERUNIT = 0x744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x40085203 + RNDADDTOENTCNT = 0x40045201 + RNDCLEARPOOL = 0x5206 + RNDGETENTCNT = 0x80045200 + RNDGETPOOL = 0x80085202 + RNDRESEEDCRNG = 0x5207 + RNDZAPENTCNT = 0x5204 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_PARAM_GET = 0x40187013 + RTC_PARAM_SET = 0x40187014 + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 + SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b + SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 + SO_RESERVE_MEM = 0x49 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x80285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x801054db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 + TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VMIN = 0x6 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WORDSIZE = 0x40 + XCASE = 0x4 + XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 +) + +// Errors +const ( + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + ECANCELED = syscall.Errno(0x7d) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x23) + EDESTADDRREQ = syscall.Errno(0x59) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EISCONN = syscall.Errno(0x6a) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTCONN = syscall.Errno(0x6b) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTUNIQ = syscall.Errno(0x4c) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPFNOSUPPORT = syscall.Errno(0x60) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGIO = syscall.Signal(0x1d) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 314a2054f..bd794e010 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -5,7 +5,7 @@ // +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 @@ -249,6 +250,8 @@ const ( RTC_EPOCH_SET = 0x8004700e RTC_IRQP_READ = 0x4004700b RTC_IRQP_SET = 0x8004700c + RTC_PARAM_GET = 0x80187013 + RTC_PARAM_SET = 0x80187014 RTC_PIE_OFF = 0x20007006 RTC_PIE_ON = 0x20007005 RTC_PLL_GET = 0x401c7011 @@ -292,6 +295,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 @@ -308,6 +312,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 @@ -321,9 +326,11 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 + SO_RESERVE_MEM = 0x49 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_RXQ_OVFL = 0x28 @@ -345,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 457e8de97..6c741b054 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -5,7 +5,7 @@ // +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 @@ -249,6 +250,8 @@ const ( RTC_EPOCH_SET = 0x8008700e RTC_IRQP_READ = 0x4008700b RTC_IRQP_SET = 0x8008700c + RTC_PARAM_GET = 0x80187013 + RTC_PARAM_SET = 0x80187014 RTC_PIE_OFF = 0x20007006 RTC_PIE_ON = 0x20007005 RTC_PLL_GET = 0x40207011 @@ -292,6 +295,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 @@ -308,6 +312,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 @@ -321,9 +326,11 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 + SO_RESERVE_MEM = 0x49 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_RXQ_OVFL = 0x28 @@ -345,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 33cd28f6b..807b8cd2a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -5,7 +5,7 @@ // +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 @@ -249,6 +250,8 @@ const ( RTC_EPOCH_SET = 0x8008700e RTC_IRQP_READ = 0x4008700b RTC_IRQP_SET = 0x8008700c + RTC_PARAM_GET = 0x80187013 + RTC_PARAM_SET = 0x80187014 RTC_PIE_OFF = 0x20007006 RTC_PIE_ON = 0x20007005 RTC_PLL_GET = 0x40207011 @@ -292,6 +295,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 @@ -308,6 +312,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 @@ -321,9 +326,11 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 + SO_RESERVE_MEM = 0x49 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_RXQ_OVFL = 0x28 @@ -345,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 0e085ba14..a39e4f5c2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -5,7 +5,7 @@ // +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 @@ -249,6 +250,8 @@ const ( RTC_EPOCH_SET = 0x8004700e RTC_IRQP_READ = 0x4004700b RTC_IRQP_SET = 0x8004700c + RTC_PARAM_GET = 0x80187013 + RTC_PARAM_SET = 0x80187014 RTC_PIE_OFF = 0x20007006 RTC_PIE_ON = 0x20007005 RTC_PLL_GET = 0x401c7011 @@ -292,6 +295,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 @@ -308,6 +312,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 @@ -321,9 +326,11 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 + SO_RESERVE_MEM = 0x49 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_RXQ_OVFL = 0x28 @@ -345,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 1b5928cff..c0fcda86b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -5,7 +5,7 @@ // +build ppc,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -147,6 +147,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 @@ -304,6 +305,8 @@ const ( RTC_EPOCH_SET = 0x8004700e RTC_IRQP_READ = 0x4004700b RTC_IRQP_SET = 0x8004700c + RTC_PARAM_GET = 0x80187013 + RTC_PARAM_SET = 0x80187014 RTC_PIE_OFF = 0x20007006 RTC_PIE_ON = 0x20007005 RTC_PLL_GET = 0x401c7011 @@ -347,6 +350,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 @@ -363,6 +367,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 @@ -376,9 +381,11 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 + SO_RESERVE_MEM = 0x49 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -399,6 +406,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index f3a41d6ec..f3b72407a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -5,7 +5,7 @@ // +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -147,6 +147,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 @@ -308,6 +309,8 @@ const ( RTC_EPOCH_SET = 0x8008700e RTC_IRQP_READ = 0x4008700b RTC_IRQP_SET = 0x8008700c + RTC_PARAM_GET = 0x80187013 + RTC_PARAM_SET = 0x80187014 RTC_PIE_OFF = 0x20007006 RTC_PIE_ON = 0x20007005 RTC_PLL_GET = 0x40207011 @@ -351,6 +354,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 @@ -367,6 +371,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 @@ -380,9 +385,11 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 + SO_RESERVE_MEM = 0x49 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -403,6 +410,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 6a5a555d5..72f2a45d5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -5,7 +5,7 @@ // +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -147,6 +147,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 @@ -308,6 +309,8 @@ const ( RTC_EPOCH_SET = 0x8008700e RTC_IRQP_READ = 0x4008700b RTC_IRQP_SET = 0x8008700c + RTC_PARAM_GET = 0x80187013 + RTC_PARAM_SET = 0x80187014 RTC_PIE_OFF = 0x20007006 RTC_PIE_ON = 0x20007005 RTC_PLL_GET = 0x40207011 @@ -351,6 +354,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 @@ -367,6 +371,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 @@ -380,9 +385,11 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 + SO_RESERVE_MEM = 0x49 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -403,6 +410,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index a4da67edb..45b214b4d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -5,7 +5,7 @@ // +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x400c4d19 OTPGETREGIONCOUNT = 0x40044d0e OTPGETREGIONINFO = 0x400c4d0f OTPLOCK = 0x800c4d10 @@ -237,6 +238,8 @@ const ( RTC_EPOCH_SET = 0x4008700e RTC_IRQP_READ = 0x8008700b RTC_IRQP_SET = 0x4008700c + RTC_PARAM_GET = 0x40187013 + RTC_PARAM_SET = 0x40187014 RTC_PIE_OFF = 0x7006 RTC_PIE_ON = 0x7005 RTC_PLL_GET = 0x80207011 @@ -280,6 +283,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 @@ -296,6 +300,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 @@ -309,9 +314,11 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 + SO_RESERVE_MEM = 0x49 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -332,6 +339,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index a7028e0ef..1897f207b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -5,7 +5,7 @@ // +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char _const.go package unix @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x400c4d19 OTPGETREGIONCOUNT = 0x40044d0e OTPGETREGIONINFO = 0x400c4d0f OTPLOCK = 0x800c4d10 @@ -312,6 +313,8 @@ const ( RTC_EPOCH_SET = 0x4008700e RTC_IRQP_READ = 0x8008700b RTC_IRQP_SET = 0x4008700c + RTC_PARAM_GET = 0x40187013 + RTC_PARAM_SET = 0x40187014 RTC_PIE_OFF = 0x7006 RTC_PIE_ON = 0x7005 RTC_PLL_GET = 0x80207011 @@ -355,6 +358,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 @@ -371,6 +375,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 @@ -384,9 +389,11 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 + SO_RESERVE_MEM = 0x49 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -407,6 +414,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index ed3b3286c..1fb7a3953 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -5,7 +5,7 @@ // +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -150,6 +150,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 @@ -303,6 +304,8 @@ const ( RTC_EPOCH_SET = 0x8008700e RTC_IRQP_READ = 0x4008700b RTC_IRQP_SET = 0x8008700c + RTC_PARAM_GET = 0x80187013 + RTC_PARAM_SET = 0x80187014 RTC_PIE_OFF = 0x20007006 RTC_PIE_ON = 0x20007005 RTC_PLL_GET = 0x40207011 @@ -346,6 +349,7 @@ const ( SO_BPF_EXTENSIONS = 0x32 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0x400 + SO_BUF_LOCK = 0x51 SO_BUSY_POLL = 0x30 SO_BUSY_POLL_BUDGET = 0x49 SO_CNX_ADVICE = 0x37 @@ -362,6 +366,7 @@ const ( SO_MARK = 0x22 SO_MAX_PACING_RATE = 0x31 SO_MEMINFO = 0x39 + SO_NETNS_COOKIE = 0x50 SO_NOFCS = 0x27 SO_OOBINLINE = 0x100 SO_PASSCRED = 0x2 @@ -375,9 +380,11 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x100b SO_RCVLOWAT = 0x800 + SO_RCVMARK = 0x54 SO_RCVTIMEO = 0x2000 SO_RCVTIMEO_NEW = 0x44 SO_RCVTIMEO_OLD = 0x2000 + SO_RESERVE_MEM = 0x52 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_RXQ_OVFL = 0x24 @@ -398,6 +405,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x42 SO_TIMESTAMPNS_OLD = 0x21 SO_TIMESTAMP_NEW = 0x46 + SO_TXREHASH = 0x53 SO_TXTIME = 0x3f SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x25 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index 593cc0fef..6d56edc05 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -1020,7 +1020,10 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index a4e4c2231..aef6c0856 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -1020,7 +1020,10 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index 91a23cc72..870215d2c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -17,6 +17,7 @@ int getdirent(int, uintptr_t, size_t); int wait4(int, uintptr_t, int, uintptr_t); int ioctl(int, int, uintptr_t); int fcntl(uintptr_t, int, uintptr_t); +int fsync_range(int, int, long long, long long); int acct(uintptr_t); int chdir(uintptr_t); int chroot(uintptr_t); @@ -29,7 +30,6 @@ int fchmod(int, unsigned int); int fchmodat(int, uintptr_t, unsigned int, int); int fchownat(int, uintptr_t, int, int, int); int fdatasync(int); -int fsync(int); int getpgid(int); int getpgrp(); int getpid(); @@ -255,6 +255,16 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsyncRange(fd int, how int, start int64, length int64) (err error) { + r0, er := C.fsync_range(C.int(fd), C.int(how), C.longlong(start), C.longlong(length)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Acct(path string) (err error) { _p0 := uintptr(unsafe.Pointer(C.CString(path))) r0, er := C.acct(C.uintptr_t(_p0)) @@ -379,16 +389,6 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fsync(fd int) (err error) { - r0, er := C.fsync(C.int(fd)) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getpgid(pid int) (pgid int, err error) { r0, er := C.getpgid(C.int(pid)) pgid = int(r0) @@ -975,7 +975,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { _p0 = &p[0] @@ -992,7 +992,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { _p0 = &p[0] diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index 33c2609b8..a89b0bfa5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -135,6 +135,16 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsyncRange(fd int, how int, start int64, length int64) (err error) { + _, e1 := callfsync_range(fd, how, start, length) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Acct(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -283,16 +293,6 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fsync(fd int) (err error) { - _, e1 := callfsync(fd) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getpgid(pid int) (pgid int, err error) { r0, e1 := callgetpgid(pid) pgid = int(r0) @@ -931,7 +931,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { _p0 = &p[0] @@ -946,7 +946,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { _p0 = &p[0] diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index 8b737fa97..2caa5adf9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -18,6 +18,7 @@ import ( //go:cgo_import_dynamic libc_wait4 wait4 "libc.a/shr_64.o" //go:cgo_import_dynamic libc_ioctl ioctl "libc.a/shr_64.o" //go:cgo_import_dynamic libc_fcntl fcntl "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_fsync_range fsync_range "libc.a/shr_64.o" //go:cgo_import_dynamic libc_acct acct "libc.a/shr_64.o" //go:cgo_import_dynamic libc_chdir chdir "libc.a/shr_64.o" //go:cgo_import_dynamic libc_chroot chroot "libc.a/shr_64.o" @@ -30,7 +31,6 @@ import ( //go:cgo_import_dynamic libc_fchmodat fchmodat "libc.a/shr_64.o" //go:cgo_import_dynamic libc_fchownat fchownat "libc.a/shr_64.o" //go:cgo_import_dynamic libc_fdatasync fdatasync "libc.a/shr_64.o" -//go:cgo_import_dynamic libc_fsync fsync "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getpgid getpgid "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getpgrp getpgrp "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getpid getpid "libc.a/shr_64.o" @@ -136,6 +136,7 @@ import ( //go:linkname libc_wait4 libc_wait4 //go:linkname libc_ioctl libc_ioctl //go:linkname libc_fcntl libc_fcntl +//go:linkname libc_fsync_range libc_fsync_range //go:linkname libc_acct libc_acct //go:linkname libc_chdir libc_chdir //go:linkname libc_chroot libc_chroot @@ -148,7 +149,6 @@ import ( //go:linkname libc_fchmodat libc_fchmodat //go:linkname libc_fchownat libc_fchownat //go:linkname libc_fdatasync libc_fdatasync -//go:linkname libc_fsync libc_fsync //go:linkname libc_getpgid libc_getpgid //go:linkname libc_getpgrp libc_getpgrp //go:linkname libc_getpid libc_getpid @@ -257,6 +257,7 @@ var ( libc_wait4, libc_ioctl, libc_fcntl, + libc_fsync_range, libc_acct, libc_chdir, libc_chroot, @@ -269,7 +270,6 @@ var ( libc_fchmodat, libc_fchownat, libc_fdatasync, - libc_fsync, libc_getpgid, libc_getpgrp, libc_getpid, @@ -430,6 +430,13 @@ func callfcntl(fd uintptr, cmd int, arg uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func callfsync_range(fd int, how int, start int64, length int64) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fsync_range)), 4, uintptr(fd), uintptr(how), uintptr(start), uintptr(length), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func callacct(_p0 uintptr) (r1 uintptr, e1 Errno) { r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_acct)), 1, _p0, 0, 0, 0, 0, 0) return @@ -514,13 +521,6 @@ func callfdatasync(fd int) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callfsync(fd int) (r1 uintptr, e1 Errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fsync)), 1, uintptr(fd), 0, 0, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func callgetpgid(pid int) (r1 uintptr, e1 Errno) { r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_getpgid)), 1, uintptr(pid), 0, 0, 0, 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index 3c260917e..944a714b1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -16,6 +16,7 @@ int getdirent(int, uintptr_t, size_t); int wait4(int, uintptr_t, int, uintptr_t); int ioctl(int, int, uintptr_t); int fcntl(uintptr_t, int, uintptr_t); +int fsync_range(int, int, long long, long long); int acct(uintptr_t); int chdir(uintptr_t); int chroot(uintptr_t); @@ -28,7 +29,6 @@ int fchmod(int, unsigned int); int fchmodat(int, uintptr_t, unsigned int, int); int fchownat(int, uintptr_t, int, int, int); int fdatasync(int); -int fsync(int); int getpgid(int); int getpgrp(); int getpid(); @@ -199,6 +199,14 @@ func callfcntl(fd uintptr, cmd int, arg uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func callfsync_range(fd int, how int, start int64, length int64) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.fsync_range(C.int(fd), C.int(how), C.longlong(start), C.longlong(length))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func callacct(_p0 uintptr) (r1 uintptr, e1 Errno) { r1 = uintptr(C.acct(C.uintptr_t(_p0))) e1 = syscall.GetErrno() @@ -295,14 +303,6 @@ func callfdatasync(fd int) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callfsync(fd int) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.fsync(C.int(fd))) - e1 = syscall.GetErrno() - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func callgetpgid(pid int) (r1 uintptr, e1 Errno) { r1 = uintptr(C.getpgid(C.int(pid))) e1 = syscall.GetErrno() diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index d4efe8d45..467deed76 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -643,17 +643,22 @@ var libc_flistxattr_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setattrlist_trampoline_addr uintptr +var libc_utimensat_trampoline_addr uintptr -//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic libc_utimensat utimensat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -734,6 +739,65 @@ var libc_sendfile_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func shmat(id int, addr uintptr, flag int) (ret uintptr, err error) { + r0, _, e1 := syscall_syscall(libc_shmat_trampoline_addr, uintptr(id), uintptr(addr), uintptr(flag)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shmat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shmat shmat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) { + r0, _, e1 := syscall_syscall(libc_shmctl_trampoline_addr, uintptr(id), uintptr(cmd), uintptr(unsafe.Pointer(buf))) + result = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shmctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shmctl shmctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmdt(addr uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_shmdt_trampoline_addr, uintptr(addr), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shmdt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shmdt shmdt "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmget(key int, size int, flag int) (id int, err error) { + r0, _, e1 := syscall_syscall(libc_shmget_trampoline_addr, uintptr(key), uintptr(size), uintptr(flag)) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shmget_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shmget shmget "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1579,6 +1643,30 @@ var libc_mknod_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1639,7 +1727,7 @@ var libc_pathconf_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1660,7 +1748,7 @@ var libc_pread_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index bc169c2ab..7e308a476 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -228,11 +228,11 @@ TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_flistxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB) -TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setattrlist(SB) +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) -GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) @@ -264,6 +264,30 @@ TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendfile_trampoline_addr(SB)/8, $libc_sendfile_trampoline<>(SB) +TEXT libc_shmat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shmat(SB) + +GLOBL ·libc_shmat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shmat_trampoline_addr(SB)/8, $libc_shmat_trampoline<>(SB) + +TEXT libc_shmctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shmctl(SB) + +GLOBL ·libc_shmctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shmctl_trampoline_addr(SB)/8, $libc_shmctl_trampoline<>(SB) + +TEXT libc_shmdt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shmdt(SB) + +GLOBL ·libc_shmdt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shmdt_trampoline_addr(SB)/8, $libc_shmdt_trampoline<>(SB) + +TEXT libc_shmget_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shmget(SB) + +GLOBL ·libc_shmget_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shmget_trampoline_addr(SB)/8, $libc_shmget_trampoline<>(SB) + TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) @@ -576,6 +600,12 @@ TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) + +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index f2ee2bd33..35938d34f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -643,17 +643,22 @@ var libc_flistxattr_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setattrlist_trampoline_addr uintptr +var libc_utimensat_trampoline_addr uintptr -//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic libc_utimensat utimensat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -734,6 +739,65 @@ var libc_sendfile_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func shmat(id int, addr uintptr, flag int) (ret uintptr, err error) { + r0, _, e1 := syscall_syscall(libc_shmat_trampoline_addr, uintptr(id), uintptr(addr), uintptr(flag)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shmat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shmat shmat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) { + r0, _, e1 := syscall_syscall(libc_shmctl_trampoline_addr, uintptr(id), uintptr(cmd), uintptr(unsafe.Pointer(buf))) + result = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shmctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shmctl shmctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmdt(addr uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_shmdt_trampoline_addr, uintptr(addr), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shmdt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shmdt shmdt "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmget(key int, size int, flag int) (id int, err error) { + r0, _, e1 := syscall_syscall(libc_shmget_trampoline_addr, uintptr(key), uintptr(size), uintptr(flag)) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shmget_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shmget shmget "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1579,6 +1643,30 @@ var libc_mknod_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1639,7 +1727,7 @@ var libc_pathconf_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1660,7 +1748,7 @@ var libc_pread_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 33e19776d..b09e5bb0e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -228,11 +228,11 @@ TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_flistxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB) -TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setattrlist(SB) +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) -GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) @@ -264,6 +264,30 @@ TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendfile_trampoline_addr(SB)/8, $libc_sendfile_trampoline<>(SB) +TEXT libc_shmat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shmat(SB) + +GLOBL ·libc_shmat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shmat_trampoline_addr(SB)/8, $libc_shmat_trampoline<>(SB) + +TEXT libc_shmctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shmctl(SB) + +GLOBL ·libc_shmctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shmctl_trampoline_addr(SB)/8, $libc_shmctl_trampoline<>(SB) + +TEXT libc_shmdt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shmdt(SB) + +GLOBL ·libc_shmdt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shmdt_trampoline_addr(SB)/8, $libc_shmdt_trampoline<>(SB) + +TEXT libc_shmget_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shmget(SB) + +GLOBL ·libc_shmget_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shmget_trampoline_addr(SB)/8, $libc_shmget_trampoline<>(SB) + TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) @@ -576,6 +600,12 @@ TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) + +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 3e9bddb7b..e9d9997ee 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -1420,7 +1420,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1437,7 +1437,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index c72a462b9..edd373b1a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -1420,7 +1420,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1437,7 +1437,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 530d5df90..82e9764b2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -1420,7 +1420,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1437,7 +1437,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 71e7df9e8..a6479acd1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -1420,7 +1420,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1437,7 +1437,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 7305cc915..bc4a27531 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1,4 +1,4 @@ -// Code generated by mkmerge.go; DO NOT EDIT. +// Code generated by mkmerge; DO NOT EDIT. //go:build linux // +build linux @@ -48,6 +48,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) @@ -100,6 +110,16 @@ func openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -211,6 +231,16 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Waitid(idType int, id int, info *Siginfo, options int, rusage *Rusage) (err error) { + _, _, e1 := Syscall6(SYS_WAITID, uintptr(idType), uintptr(id), uintptr(unsafe.Pointer(info)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) ret = int(r0) @@ -389,6 +419,21 @@ func mount(source string, target string, fstype string, flags uintptr, data *byt // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func mountSetattr(dirfd int, pathname string, flags uint, attr *MountAttr, size uintptr) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT_SETATTR, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(unsafe.Pointer(attr)), uintptr(size), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Acct(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -783,6 +828,49 @@ func Fsync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) { + r0, _, e1 := Syscall(SYS_FSMOUNT, uintptr(fd), uintptr(flags), uintptr(mountAttrs)) + fsfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsopen(fsName string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsName) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_FSOPEN, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathName) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_FSPICK, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -1160,6 +1248,26 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MoveMount(fromDirfd int, fromPathName string, toDirfd int, toPathName string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fromPathName) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(toPathName) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOVE_MOUNT, uintptr(fromDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(toDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1170,6 +1278,22 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func OpenTree(dfd int, fileName string, flags uint) (r int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fileName) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN_TREE, uintptr(dfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + r = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) fd = int(r0) @@ -1201,7 +1325,7 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { +func Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) if e1 != 0 { err = errnoErr(e1) @@ -1935,8 +2059,93 @@ func ProcessVMWritev(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags u // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) +func PidfdOpen(pid int, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_PIDFD_OPEN, uintptr(pid), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PidfdGetfd(pidfd int, targetfd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_PIDFD_GETFD, uintptr(pidfd), uintptr(targetfd), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PidfdSendSignal(pidfd int, sig Signal, info *Siginfo, flags int) (err error) { + _, _, e1 := Syscall6(SYS_PIDFD_SEND_SIGNAL, uintptr(pidfd), uintptr(sig), uintptr(unsafe.Pointer(info)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmat(id int, addr uintptr, flag int) (ret uintptr, err error) { + r0, _, e1 := Syscall(SYS_SHMAT, uintptr(id), uintptr(addr), uintptr(flag)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) { + r0, _, e1 := Syscall(SYS_SHMCTL, uintptr(id), uintptr(cmd), uintptr(unsafe.Pointer(buf))) + result = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmdt(addr uintptr) (err error) { + _, _, e1 := Syscall(SYS_SHMDT, uintptr(addr), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmget(key int, size int, flag int) (id int, err error) { + r0, _, e1 := Syscall(SYS_SHMGET, uintptr(key), uintptr(size), uintptr(flag)) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getitimer(which int, currValue *Itimerval) (err error) { + _, _, e1 := Syscall(SYS_GETITIMER, uintptr(which), uintptr(unsafe.Pointer(currValue)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setitimer(which int, newValue *Itimerval, oldValue *Itimerval) (err error) { + _, _, e1 := Syscall(SYS_SETITIMER, uintptr(which), uintptr(unsafe.Pointer(newValue)), uintptr(unsafe.Pointer(oldValue))) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index e37096e4d..88af526b7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -l32 -tags linux,386 syscall_linux.go syscall_linux_386.go +// go run mksyscall.go -l32 -tags linux,386 syscall_linux.go syscall_linux_386.go syscall_linux_alarm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && 386 @@ -46,37 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe(p *[2]_C_int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -181,17 +150,6 @@ func Getuid() (uid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func InotifyInit() (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Ioperm(from int, num int, on int) (err error) { _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) if e1 != 0 { @@ -242,7 +200,7 @@ func Lstat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -259,7 +217,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -569,9 +527,9 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) +func Alarm(seconds uint) (remaining uint, err error) { + r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) + remaining = uint(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 9919d8486..2a0c4aa6a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags linux,amd64 syscall_linux.go syscall_linux_amd64.go +// go run mksyscall.go -tags linux,amd64 syscall_linux.go syscall_linux_amd64.go syscall_linux_alarm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && amd64 @@ -46,27 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -191,17 +170,6 @@ func Getuid() (uid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func inotifyInit() (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Ioperm(from int, num int, on int) (err error) { _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) if e1 != 0 { @@ -247,6 +215,17 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdSecret(flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_MEMFD_SECRET, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pause() (err error) { _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) if e1 != 0 { @@ -257,7 +236,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -274,7 +253,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -476,17 +455,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) @@ -711,19 +679,13 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe(p *[2]_C_int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } @@ -732,13 +694,9 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(cmdline) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) +func Alarm(seconds uint) (remaining uint, err error) { + r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) + remaining = uint(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 076754d48..4882bde3a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -46,27 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe(p *[2]_C_int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) @@ -235,27 +214,6 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -340,17 +298,6 @@ func Getuid() (uid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func InotifyInit() (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Lchown(path string, uid int, gid int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -591,7 +538,7 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -608,7 +555,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -681,17 +628,6 @@ func setrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func armSyncFileRange(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_ARM_SYNC_FILE_RANGE, uintptr(fd), uintptr(flags), uintptr(off), uintptr(off>>32), uintptr(n), uintptr(n>>32)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index e893f987f..9f8c24e43 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -180,7 +180,18 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func MemfdSecret(flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_MEMFD_SECRET, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -197,7 +208,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -389,17 +400,6 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go new file mode 100644 index 000000000..523f2ba03 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go @@ -0,0 +1,527 @@ +// go run mksyscall.go -tags linux,loong64 syscall_linux.go syscall_linux_loong64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build linux && loong64 +// +build linux,loong64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_PWAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 4703cf3c3..d7d6f4244 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -b32 -arm -tags linux,mips syscall_linux.go syscall_linux_mipsx.go +// go run mksyscall.go -b32 -arm -tags linux,mips syscall_linux.go syscall_linux_mipsx.go syscall_linux_alarm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips @@ -46,27 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -171,7 +150,7 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -188,7 +167,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -365,17 +344,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) @@ -544,17 +512,6 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func InotifyInit() (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Ioperm(from int, num int, on int) (err error) { _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) if e1 != 0 { @@ -706,18 +663,6 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (p1 int, p2 int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - p1 = int(r0) - p2 = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) { r0, _, e1 := Syscall6(SYS_MMAP2, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(pageOffset)) xaddr = uintptr(r0) @@ -749,9 +694,9 @@ func setrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) +func Alarm(seconds uint) (remaining uint, err error) { + r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) + remaining = uint(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index a134f9a4d..7f1f8e653 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags linux,mips64 syscall_linux.go syscall_linux_mips64x.go +// go run mksyscall.go -tags linux,mips64 syscall_linux.go syscall_linux_mips64x.go syscall_linux_alarm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips64 @@ -46,27 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -201,7 +180,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -218,7 +197,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -420,17 +399,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) @@ -720,9 +688,9 @@ func stat(path string, st *stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) +func Alarm(seconds uint) (remaining uint, err error) { + r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) + remaining = uint(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index b1fff2d94..f933d0f51 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -46,27 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -201,7 +180,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -218,7 +197,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -420,17 +399,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) @@ -717,14 +685,3 @@ func stat(path string, st *stat_t) (err error) { } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index d13d6da01..297d0a998 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -l32 -arm -tags linux,mipsle syscall_linux.go syscall_linux_mipsx.go +// go run mksyscall.go -l32 -arm -tags linux,mipsle syscall_linux.go syscall_linux_mipsx.go syscall_linux_alarm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mipsle @@ -46,27 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -171,7 +150,7 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -188,7 +167,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -365,17 +344,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) @@ -544,17 +512,6 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func InotifyInit() (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Ioperm(from int, num int, on int) (err error) { _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) if e1 != 0 { @@ -706,18 +663,6 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (p1 int, p2 int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - p1 = int(r0) - p2 = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) { r0, _, e1 := Syscall6(SYS_MMAP2, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(pageOffset)) xaddr = uintptr(r0) @@ -749,9 +694,9 @@ func setrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) +func Alarm(seconds uint) (remaining uint, err error) { + r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) + remaining = uint(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index 927cf1a00..2e32e7a44 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -b32 -tags linux,ppc syscall_linux.go syscall_linux_ppc.go +// go run mksyscall.go -b32 -tags linux,ppc syscall_linux.go syscall_linux_ppc.go syscall_linux_alarm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc @@ -46,27 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -161,17 +140,6 @@ func Getuid() (uid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func InotifyInit() (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Ioperm(from int, num int, on int) (err error) { _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) if e1 != 0 { @@ -242,7 +210,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -259,7 +227,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -441,17 +409,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) @@ -717,27 +674,6 @@ func setrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe(p *[2]_C_int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(flags), uintptr(off>>32), uintptr(off), uintptr(n>>32), uintptr(n)) if e1 != 0 { @@ -760,3 +696,14 @@ func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, f } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Alarm(seconds uint) (remaining uint, err error) { + r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) + remaining = uint(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index da8ec0396..3c5317046 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags linux,ppc64 syscall_linux.go syscall_linux_ppc64x.go +// go run mksyscall.go -tags linux,ppc64 syscall_linux.go syscall_linux_ppc64x.go syscall_linux_alarm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64 @@ -46,27 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -191,17 +170,6 @@ func Getuid() (uid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func InotifyInit() (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Ioperm(from int, num int, on int) (err error) { _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) if e1 != 0 { @@ -272,7 +240,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -289,7 +257,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -507,17 +475,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) @@ -763,27 +720,6 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe(p *[2]_C_int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(flags), uintptr(off), uintptr(n), 0, 0) if e1 != 0 { @@ -806,3 +742,14 @@ func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, f } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Alarm(seconds uint) (remaining uint, err error) { + r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) + remaining = uint(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 083f493bb..a00c6744e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags linux,ppc64le syscall_linux.go syscall_linux_ppc64x.go +// go run mksyscall.go -tags linux,ppc64le syscall_linux.go syscall_linux_ppc64x.go syscall_linux_alarm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64le @@ -46,27 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -191,17 +170,6 @@ func Getuid() (uid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func InotifyInit() (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Ioperm(from int, num int, on int) (err error) { _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) if e1 != 0 { @@ -272,7 +240,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -289,7 +257,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -507,17 +475,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) @@ -763,27 +720,6 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe(p *[2]_C_int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(flags), uintptr(off), uintptr(n), 0, 0) if e1 != 0 { @@ -806,3 +742,14 @@ func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, f } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Alarm(seconds uint) (remaining uint, err error) { + r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) + remaining = uint(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 63b393b80..1239cc2de 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -180,7 +180,18 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func MemfdSecret(flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_MEMFD_SECRET, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -197,7 +208,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -369,17 +380,6 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index bb347407d..e0dabc602 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags linux,s390x syscall_linux.go syscall_linux_s390x.go +// go run mksyscall.go -tags linux,s390x syscall_linux.go syscall_linux_s390x.go syscall_linux_alarm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && s390x @@ -46,27 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -191,17 +170,6 @@ func Getuid() (uid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func InotifyInit() (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Lchown(path string, uid int, gid int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -242,7 +210,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -259,7 +227,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -553,9 +521,13 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } @@ -564,13 +536,9 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(cmdline) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) +func Alarm(seconds uint) (remaining uint, err error) { + r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) + remaining = uint(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 8edc517e1..368623c0f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags linux,sparc64 syscall_linux.go syscall_linux_sparc64.go +// go run mksyscall.go -tags linux,sparc64 syscall_linux.go syscall_linux_sparc64.go syscall_linux_alarm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && sparc64 @@ -73,16 +73,6 @@ func Fadvise(fd int, offset int64, length int64, advice int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -180,17 +170,6 @@ func Getuid() (uid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func InotifyInit() (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Lchown(path string, uid int, gid int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -241,7 +220,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -258,7 +237,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -476,17 +455,6 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) @@ -721,19 +689,9 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe(p *[2]_C_int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) +func Alarm(seconds uint) (remaining uint, err error) { + r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) + remaining = uint(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 4726ab30a..4af561a48 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -351,18 +351,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (fd1 int, fd2 int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - fd1 = int(r0) - fd2 = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { @@ -1342,7 +1330,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1359,7 +1347,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index fe71456db..3b90e9448 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -351,18 +351,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (fd1 int, fd2 int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - fd1 = int(r0) - fd2 = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { @@ -1342,7 +1330,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1359,7 +1347,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 0b5b2f014..890f4ccd1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -351,18 +351,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (fd1 int, fd2 int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - fd1 = int(r0) - fd2 = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { @@ -1342,7 +1330,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1359,7 +1347,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index bfca28648..c79f071fc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -351,18 +351,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (fd1 int, fd2 int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - fd1 = int(r0) - fd2 = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { @@ -1342,7 +1330,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1359,7 +1347,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 8f80f4ade..a057fc5d3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1128,7 +1128,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1145,7 +1145,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 3a47aca7b..04db8fa2f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1128,7 +1128,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1145,7 +1145,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 883a9b45e..69f803006 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1128,7 +1128,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1145,7 +1145,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index aac7fdc95..c96a50517 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1128,7 +1128,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1145,7 +1145,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index 877618746..016d959bc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1128,7 +1128,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1145,7 +1145,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 4e18d5c99..fdf53f8da 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -66,6 +66,7 @@ import ( //go:cgo_import_dynamic libc_getpriority getpriority "libc.so" //go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" //go:cgo_import_dynamic libc_getrusage getrusage "libc.so" +//go:cgo_import_dynamic libc_getsid getsid "libc.so" //go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" //go:cgo_import_dynamic libc_getuid getuid "libc.so" //go:cgo_import_dynamic libc_kill kill "libc.so" @@ -141,6 +142,11 @@ import ( //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" +//go:cgo_import_dynamic libc_port_create port_create "libc.so" +//go:cgo_import_dynamic libc_port_associate port_associate "libc.so" +//go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so" +//go:cgo_import_dynamic libc_port_get port_get "libc.so" +//go:cgo_import_dynamic libc_port_getn port_getn "libc.so" //go:linkname procpipe libc_pipe //go:linkname procpipe2 libc_pipe2 @@ -197,6 +203,7 @@ import ( //go:linkname procGetpriority libc_getpriority //go:linkname procGetrlimit libc_getrlimit //go:linkname procGetrusage libc_getrusage +//go:linkname procGetsid libc_getsid //go:linkname procGettimeofday libc_gettimeofday //go:linkname procGetuid libc_getuid //go:linkname procKill libc_kill @@ -222,8 +229,8 @@ import ( //go:linkname procOpenat libc_openat //go:linkname procPathconf libc_pathconf //go:linkname procPause libc_pause -//go:linkname procPread libc_pread -//go:linkname procPwrite libc_pwrite +//go:linkname procpread libc_pread +//go:linkname procpwrite libc_pwrite //go:linkname procread libc_read //go:linkname procReadlink libc_readlink //go:linkname procRename libc_rename @@ -272,6 +279,11 @@ import ( //go:linkname procgetpeername libc_getpeername //go:linkname procsetsockopt libc_setsockopt //go:linkname procrecvfrom libc_recvfrom +//go:linkname procport_create libc_port_create +//go:linkname procport_associate libc_port_associate +//go:linkname procport_dissociate libc_port_dissociate +//go:linkname procport_get libc_port_get +//go:linkname procport_getn libc_port_getn var ( procpipe, @@ -329,6 +341,7 @@ var ( procGetpriority, procGetrlimit, procGetrusage, + procGetsid, procGettimeofday, procGetuid, procKill, @@ -354,8 +367,8 @@ var ( procOpenat, procPathconf, procPause, - procPread, - procPwrite, + procpread, + procpwrite, procread, procReadlink, procRename, @@ -403,7 +416,12 @@ var ( proc__xnet_getsockopt, procgetpeername, procsetsockopt, - procrecvfrom syscallFunc + procrecvfrom, + procport_create, + procport_associate, + procport_dissociate, + procport_get, + procport_getn syscallFunc ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1029,6 +1047,17 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetsid)), 1, uintptr(pid), 0, 0, 0, 0, 0) + sid = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0) if e1 != 0 { @@ -1365,12 +1394,12 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { _p0 = &p[0] } - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPread)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpread)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = e1 @@ -1380,12 +1409,12 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { _p0 = &p[0] } - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPwrite)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpwrite)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = e1 @@ -1981,3 +2010,58 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func port_create() (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func port_associate(port int, source int, object uintptr, events int, user *byte) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_associate)), 5, uintptr(port), uintptr(source), uintptr(object), uintptr(events), uintptr(unsafe.Pointer(user)), 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func port_dissociate(port int, source int, object uintptr) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_dissociate)), 3, uintptr(port), uintptr(source), uintptr(object), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func port_get(port int, pe *portEvent, timeout *Timespec) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_get)), 3, uintptr(port), uintptr(unsafe.Pointer(pe)), uintptr(unsafe.Pointer(timeout)), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func port_getn(port int, pe *portEvent, max uint32, nget *uint32, timeout *Timespec) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_getn)), 5, uintptr(port), uintptr(unsafe.Pointer(pe)), uintptr(max), uintptr(unsafe.Pointer(nget)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index fbc59b7fd..62192e1de 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -439,4 +439,12 @@ const ( SYS_PROCESS_MADVISE = 440 SYS_EPOLL_PWAIT2 = 441 SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_MEMFD_SECRET = 447 + SYS_PROCESS_MRELEASE = 448 + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 04d16d771..490aab5d2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -7,358 +7,366 @@ package unix const ( - SYS_READ = 0 - SYS_WRITE = 1 - SYS_OPEN = 2 - SYS_CLOSE = 3 - SYS_STAT = 4 - SYS_FSTAT = 5 - SYS_LSTAT = 6 - SYS_POLL = 7 - SYS_LSEEK = 8 - SYS_MMAP = 9 - SYS_MPROTECT = 10 - SYS_MUNMAP = 11 - SYS_BRK = 12 - SYS_RT_SIGACTION = 13 - SYS_RT_SIGPROCMASK = 14 - SYS_RT_SIGRETURN = 15 - SYS_IOCTL = 16 - SYS_PREAD64 = 17 - SYS_PWRITE64 = 18 - SYS_READV = 19 - SYS_WRITEV = 20 - SYS_ACCESS = 21 - SYS_PIPE = 22 - SYS_SELECT = 23 - SYS_SCHED_YIELD = 24 - SYS_MREMAP = 25 - SYS_MSYNC = 26 - SYS_MINCORE = 27 - SYS_MADVISE = 28 - SYS_SHMGET = 29 - SYS_SHMAT = 30 - SYS_SHMCTL = 31 - SYS_DUP = 32 - SYS_DUP2 = 33 - SYS_PAUSE = 34 - SYS_NANOSLEEP = 35 - SYS_GETITIMER = 36 - SYS_ALARM = 37 - SYS_SETITIMER = 38 - SYS_GETPID = 39 - SYS_SENDFILE = 40 - SYS_SOCKET = 41 - SYS_CONNECT = 42 - SYS_ACCEPT = 43 - SYS_SENDTO = 44 - SYS_RECVFROM = 45 - SYS_SENDMSG = 46 - SYS_RECVMSG = 47 - SYS_SHUTDOWN = 48 - SYS_BIND = 49 - SYS_LISTEN = 50 - SYS_GETSOCKNAME = 51 - SYS_GETPEERNAME = 52 - SYS_SOCKETPAIR = 53 - SYS_SETSOCKOPT = 54 - SYS_GETSOCKOPT = 55 - SYS_CLONE = 56 - SYS_FORK = 57 - SYS_VFORK = 58 - SYS_EXECVE = 59 - SYS_EXIT = 60 - SYS_WAIT4 = 61 - SYS_KILL = 62 - SYS_UNAME = 63 - SYS_SEMGET = 64 - SYS_SEMOP = 65 - SYS_SEMCTL = 66 - SYS_SHMDT = 67 - SYS_MSGGET = 68 - SYS_MSGSND = 69 - SYS_MSGRCV = 70 - SYS_MSGCTL = 71 - SYS_FCNTL = 72 - SYS_FLOCK = 73 - SYS_FSYNC = 74 - SYS_FDATASYNC = 75 - SYS_TRUNCATE = 76 - SYS_FTRUNCATE = 77 - SYS_GETDENTS = 78 - SYS_GETCWD = 79 - SYS_CHDIR = 80 - SYS_FCHDIR = 81 - SYS_RENAME = 82 - SYS_MKDIR = 83 - SYS_RMDIR = 84 - SYS_CREAT = 85 - SYS_LINK = 86 - SYS_UNLINK = 87 - SYS_SYMLINK = 88 - SYS_READLINK = 89 - SYS_CHMOD = 90 - SYS_FCHMOD = 91 - SYS_CHOWN = 92 - SYS_FCHOWN = 93 - SYS_LCHOWN = 94 - SYS_UMASK = 95 - SYS_GETTIMEOFDAY = 96 - SYS_GETRLIMIT = 97 - SYS_GETRUSAGE = 98 - SYS_SYSINFO = 99 - SYS_TIMES = 100 - SYS_PTRACE = 101 - SYS_GETUID = 102 - SYS_SYSLOG = 103 - SYS_GETGID = 104 - SYS_SETUID = 105 - SYS_SETGID = 106 - SYS_GETEUID = 107 - SYS_GETEGID = 108 - SYS_SETPGID = 109 - SYS_GETPPID = 110 - SYS_GETPGRP = 111 - SYS_SETSID = 112 - SYS_SETREUID = 113 - SYS_SETREGID = 114 - SYS_GETGROUPS = 115 - SYS_SETGROUPS = 116 - SYS_SETRESUID = 117 - SYS_GETRESUID = 118 - SYS_SETRESGID = 119 - SYS_GETRESGID = 120 - SYS_GETPGID = 121 - SYS_SETFSUID = 122 - SYS_SETFSGID = 123 - SYS_GETSID = 124 - SYS_CAPGET = 125 - SYS_CAPSET = 126 - SYS_RT_SIGPENDING = 127 - SYS_RT_SIGTIMEDWAIT = 128 - SYS_RT_SIGQUEUEINFO = 129 - SYS_RT_SIGSUSPEND = 130 - SYS_SIGALTSTACK = 131 - SYS_UTIME = 132 - SYS_MKNOD = 133 - SYS_USELIB = 134 - SYS_PERSONALITY = 135 - SYS_USTAT = 136 - SYS_STATFS = 137 - SYS_FSTATFS = 138 - SYS_SYSFS = 139 - SYS_GETPRIORITY = 140 - SYS_SETPRIORITY = 141 - SYS_SCHED_SETPARAM = 142 - SYS_SCHED_GETPARAM = 143 - SYS_SCHED_SETSCHEDULER = 144 - SYS_SCHED_GETSCHEDULER = 145 - SYS_SCHED_GET_PRIORITY_MAX = 146 - SYS_SCHED_GET_PRIORITY_MIN = 147 - SYS_SCHED_RR_GET_INTERVAL = 148 - SYS_MLOCK = 149 - SYS_MUNLOCK = 150 - SYS_MLOCKALL = 151 - SYS_MUNLOCKALL = 152 - SYS_VHANGUP = 153 - SYS_MODIFY_LDT = 154 - SYS_PIVOT_ROOT = 155 - SYS__SYSCTL = 156 - SYS_PRCTL = 157 - SYS_ARCH_PRCTL = 158 - SYS_ADJTIMEX = 159 - SYS_SETRLIMIT = 160 - SYS_CHROOT = 161 - SYS_SYNC = 162 - SYS_ACCT = 163 - SYS_SETTIMEOFDAY = 164 - SYS_MOUNT = 165 - SYS_UMOUNT2 = 166 - SYS_SWAPON = 167 - SYS_SWAPOFF = 168 - SYS_REBOOT = 169 - SYS_SETHOSTNAME = 170 - SYS_SETDOMAINNAME = 171 - SYS_IOPL = 172 - SYS_IOPERM = 173 - SYS_CREATE_MODULE = 174 - SYS_INIT_MODULE = 175 - SYS_DELETE_MODULE = 176 - SYS_GET_KERNEL_SYMS = 177 - SYS_QUERY_MODULE = 178 - SYS_QUOTACTL = 179 - SYS_NFSSERVCTL = 180 - SYS_GETPMSG = 181 - SYS_PUTPMSG = 182 - SYS_AFS_SYSCALL = 183 - SYS_TUXCALL = 184 - SYS_SECURITY = 185 - SYS_GETTID = 186 - SYS_READAHEAD = 187 - SYS_SETXATTR = 188 - SYS_LSETXATTR = 189 - SYS_FSETXATTR = 190 - SYS_GETXATTR = 191 - SYS_LGETXATTR = 192 - SYS_FGETXATTR = 193 - SYS_LISTXATTR = 194 - SYS_LLISTXATTR = 195 - SYS_FLISTXATTR = 196 - SYS_REMOVEXATTR = 197 - SYS_LREMOVEXATTR = 198 - SYS_FREMOVEXATTR = 199 - SYS_TKILL = 200 - SYS_TIME = 201 - SYS_FUTEX = 202 - SYS_SCHED_SETAFFINITY = 203 - SYS_SCHED_GETAFFINITY = 204 - SYS_SET_THREAD_AREA = 205 - SYS_IO_SETUP = 206 - SYS_IO_DESTROY = 207 - SYS_IO_GETEVENTS = 208 - SYS_IO_SUBMIT = 209 - SYS_IO_CANCEL = 210 - SYS_GET_THREAD_AREA = 211 - SYS_LOOKUP_DCOOKIE = 212 - SYS_EPOLL_CREATE = 213 - SYS_EPOLL_CTL_OLD = 214 - SYS_EPOLL_WAIT_OLD = 215 - SYS_REMAP_FILE_PAGES = 216 - SYS_GETDENTS64 = 217 - SYS_SET_TID_ADDRESS = 218 - SYS_RESTART_SYSCALL = 219 - SYS_SEMTIMEDOP = 220 - SYS_FADVISE64 = 221 - SYS_TIMER_CREATE = 222 - SYS_TIMER_SETTIME = 223 - SYS_TIMER_GETTIME = 224 - SYS_TIMER_GETOVERRUN = 225 - SYS_TIMER_DELETE = 226 - SYS_CLOCK_SETTIME = 227 - SYS_CLOCK_GETTIME = 228 - SYS_CLOCK_GETRES = 229 - SYS_CLOCK_NANOSLEEP = 230 - SYS_EXIT_GROUP = 231 - SYS_EPOLL_WAIT = 232 - SYS_EPOLL_CTL = 233 - SYS_TGKILL = 234 - SYS_UTIMES = 235 - SYS_VSERVER = 236 - SYS_MBIND = 237 - SYS_SET_MEMPOLICY = 238 - SYS_GET_MEMPOLICY = 239 - SYS_MQ_OPEN = 240 - SYS_MQ_UNLINK = 241 - SYS_MQ_TIMEDSEND = 242 - SYS_MQ_TIMEDRECEIVE = 243 - SYS_MQ_NOTIFY = 244 - SYS_MQ_GETSETATTR = 245 - SYS_KEXEC_LOAD = 246 - SYS_WAITID = 247 - SYS_ADD_KEY = 248 - SYS_REQUEST_KEY = 249 - SYS_KEYCTL = 250 - SYS_IOPRIO_SET = 251 - SYS_IOPRIO_GET = 252 - SYS_INOTIFY_INIT = 253 - SYS_INOTIFY_ADD_WATCH = 254 - SYS_INOTIFY_RM_WATCH = 255 - SYS_MIGRATE_PAGES = 256 - SYS_OPENAT = 257 - SYS_MKDIRAT = 258 - SYS_MKNODAT = 259 - SYS_FCHOWNAT = 260 - SYS_FUTIMESAT = 261 - SYS_NEWFSTATAT = 262 - SYS_UNLINKAT = 263 - SYS_RENAMEAT = 264 - SYS_LINKAT = 265 - SYS_SYMLINKAT = 266 - SYS_READLINKAT = 267 - SYS_FCHMODAT = 268 - SYS_FACCESSAT = 269 - SYS_PSELECT6 = 270 - SYS_PPOLL = 271 - SYS_UNSHARE = 272 - SYS_SET_ROBUST_LIST = 273 - SYS_GET_ROBUST_LIST = 274 - SYS_SPLICE = 275 - SYS_TEE = 276 - SYS_SYNC_FILE_RANGE = 277 - SYS_VMSPLICE = 278 - SYS_MOVE_PAGES = 279 - SYS_UTIMENSAT = 280 - SYS_EPOLL_PWAIT = 281 - SYS_SIGNALFD = 282 - SYS_TIMERFD_CREATE = 283 - SYS_EVENTFD = 284 - SYS_FALLOCATE = 285 - SYS_TIMERFD_SETTIME = 286 - SYS_TIMERFD_GETTIME = 287 - SYS_ACCEPT4 = 288 - SYS_SIGNALFD4 = 289 - SYS_EVENTFD2 = 290 - SYS_EPOLL_CREATE1 = 291 - SYS_DUP3 = 292 - SYS_PIPE2 = 293 - SYS_INOTIFY_INIT1 = 294 - SYS_PREADV = 295 - SYS_PWRITEV = 296 - SYS_RT_TGSIGQUEUEINFO = 297 - SYS_PERF_EVENT_OPEN = 298 - SYS_RECVMMSG = 299 - SYS_FANOTIFY_INIT = 300 - SYS_FANOTIFY_MARK = 301 - SYS_PRLIMIT64 = 302 - SYS_NAME_TO_HANDLE_AT = 303 - SYS_OPEN_BY_HANDLE_AT = 304 - SYS_CLOCK_ADJTIME = 305 - SYS_SYNCFS = 306 - SYS_SENDMMSG = 307 - SYS_SETNS = 308 - SYS_GETCPU = 309 - SYS_PROCESS_VM_READV = 310 - SYS_PROCESS_VM_WRITEV = 311 - SYS_KCMP = 312 - SYS_FINIT_MODULE = 313 - SYS_SCHED_SETATTR = 314 - SYS_SCHED_GETATTR = 315 - SYS_RENAMEAT2 = 316 - SYS_SECCOMP = 317 - SYS_GETRANDOM = 318 - SYS_MEMFD_CREATE = 319 - SYS_KEXEC_FILE_LOAD = 320 - SYS_BPF = 321 - SYS_EXECVEAT = 322 - SYS_USERFAULTFD = 323 - SYS_MEMBARRIER = 324 - SYS_MLOCK2 = 325 - SYS_COPY_FILE_RANGE = 326 - SYS_PREADV2 = 327 - SYS_PWRITEV2 = 328 - SYS_PKEY_MPROTECT = 329 - SYS_PKEY_ALLOC = 330 - SYS_PKEY_FREE = 331 - SYS_STATX = 332 - SYS_IO_PGETEVENTS = 333 - SYS_RSEQ = 334 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLONE3 = 435 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_READ = 0 + SYS_WRITE = 1 + SYS_OPEN = 2 + SYS_CLOSE = 3 + SYS_STAT = 4 + SYS_FSTAT = 5 + SYS_LSTAT = 6 + SYS_POLL = 7 + SYS_LSEEK = 8 + SYS_MMAP = 9 + SYS_MPROTECT = 10 + SYS_MUNMAP = 11 + SYS_BRK = 12 + SYS_RT_SIGACTION = 13 + SYS_RT_SIGPROCMASK = 14 + SYS_RT_SIGRETURN = 15 + SYS_IOCTL = 16 + SYS_PREAD64 = 17 + SYS_PWRITE64 = 18 + SYS_READV = 19 + SYS_WRITEV = 20 + SYS_ACCESS = 21 + SYS_PIPE = 22 + SYS_SELECT = 23 + SYS_SCHED_YIELD = 24 + SYS_MREMAP = 25 + SYS_MSYNC = 26 + SYS_MINCORE = 27 + SYS_MADVISE = 28 + SYS_SHMGET = 29 + SYS_SHMAT = 30 + SYS_SHMCTL = 31 + SYS_DUP = 32 + SYS_DUP2 = 33 + SYS_PAUSE = 34 + SYS_NANOSLEEP = 35 + SYS_GETITIMER = 36 + SYS_ALARM = 37 + SYS_SETITIMER = 38 + SYS_GETPID = 39 + SYS_SENDFILE = 40 + SYS_SOCKET = 41 + SYS_CONNECT = 42 + SYS_ACCEPT = 43 + SYS_SENDTO = 44 + SYS_RECVFROM = 45 + SYS_SENDMSG = 46 + SYS_RECVMSG = 47 + SYS_SHUTDOWN = 48 + SYS_BIND = 49 + SYS_LISTEN = 50 + SYS_GETSOCKNAME = 51 + SYS_GETPEERNAME = 52 + SYS_SOCKETPAIR = 53 + SYS_SETSOCKOPT = 54 + SYS_GETSOCKOPT = 55 + SYS_CLONE = 56 + SYS_FORK = 57 + SYS_VFORK = 58 + SYS_EXECVE = 59 + SYS_EXIT = 60 + SYS_WAIT4 = 61 + SYS_KILL = 62 + SYS_UNAME = 63 + SYS_SEMGET = 64 + SYS_SEMOP = 65 + SYS_SEMCTL = 66 + SYS_SHMDT = 67 + SYS_MSGGET = 68 + SYS_MSGSND = 69 + SYS_MSGRCV = 70 + SYS_MSGCTL = 71 + SYS_FCNTL = 72 + SYS_FLOCK = 73 + SYS_FSYNC = 74 + SYS_FDATASYNC = 75 + SYS_TRUNCATE = 76 + SYS_FTRUNCATE = 77 + SYS_GETDENTS = 78 + SYS_GETCWD = 79 + SYS_CHDIR = 80 + SYS_FCHDIR = 81 + SYS_RENAME = 82 + SYS_MKDIR = 83 + SYS_RMDIR = 84 + SYS_CREAT = 85 + SYS_LINK = 86 + SYS_UNLINK = 87 + SYS_SYMLINK = 88 + SYS_READLINK = 89 + SYS_CHMOD = 90 + SYS_FCHMOD = 91 + SYS_CHOWN = 92 + SYS_FCHOWN = 93 + SYS_LCHOWN = 94 + SYS_UMASK = 95 + SYS_GETTIMEOFDAY = 96 + SYS_GETRLIMIT = 97 + SYS_GETRUSAGE = 98 + SYS_SYSINFO = 99 + SYS_TIMES = 100 + SYS_PTRACE = 101 + SYS_GETUID = 102 + SYS_SYSLOG = 103 + SYS_GETGID = 104 + SYS_SETUID = 105 + SYS_SETGID = 106 + SYS_GETEUID = 107 + SYS_GETEGID = 108 + SYS_SETPGID = 109 + SYS_GETPPID = 110 + SYS_GETPGRP = 111 + SYS_SETSID = 112 + SYS_SETREUID = 113 + SYS_SETREGID = 114 + SYS_GETGROUPS = 115 + SYS_SETGROUPS = 116 + SYS_SETRESUID = 117 + SYS_GETRESUID = 118 + SYS_SETRESGID = 119 + SYS_GETRESGID = 120 + SYS_GETPGID = 121 + SYS_SETFSUID = 122 + SYS_SETFSGID = 123 + SYS_GETSID = 124 + SYS_CAPGET = 125 + SYS_CAPSET = 126 + SYS_RT_SIGPENDING = 127 + SYS_RT_SIGTIMEDWAIT = 128 + SYS_RT_SIGQUEUEINFO = 129 + SYS_RT_SIGSUSPEND = 130 + SYS_SIGALTSTACK = 131 + SYS_UTIME = 132 + SYS_MKNOD = 133 + SYS_USELIB = 134 + SYS_PERSONALITY = 135 + SYS_USTAT = 136 + SYS_STATFS = 137 + SYS_FSTATFS = 138 + SYS_SYSFS = 139 + SYS_GETPRIORITY = 140 + SYS_SETPRIORITY = 141 + SYS_SCHED_SETPARAM = 142 + SYS_SCHED_GETPARAM = 143 + SYS_SCHED_SETSCHEDULER = 144 + SYS_SCHED_GETSCHEDULER = 145 + SYS_SCHED_GET_PRIORITY_MAX = 146 + SYS_SCHED_GET_PRIORITY_MIN = 147 + SYS_SCHED_RR_GET_INTERVAL = 148 + SYS_MLOCK = 149 + SYS_MUNLOCK = 150 + SYS_MLOCKALL = 151 + SYS_MUNLOCKALL = 152 + SYS_VHANGUP = 153 + SYS_MODIFY_LDT = 154 + SYS_PIVOT_ROOT = 155 + SYS__SYSCTL = 156 + SYS_PRCTL = 157 + SYS_ARCH_PRCTL = 158 + SYS_ADJTIMEX = 159 + SYS_SETRLIMIT = 160 + SYS_CHROOT = 161 + SYS_SYNC = 162 + SYS_ACCT = 163 + SYS_SETTIMEOFDAY = 164 + SYS_MOUNT = 165 + SYS_UMOUNT2 = 166 + SYS_SWAPON = 167 + SYS_SWAPOFF = 168 + SYS_REBOOT = 169 + SYS_SETHOSTNAME = 170 + SYS_SETDOMAINNAME = 171 + SYS_IOPL = 172 + SYS_IOPERM = 173 + SYS_CREATE_MODULE = 174 + SYS_INIT_MODULE = 175 + SYS_DELETE_MODULE = 176 + SYS_GET_KERNEL_SYMS = 177 + SYS_QUERY_MODULE = 178 + SYS_QUOTACTL = 179 + SYS_NFSSERVCTL = 180 + SYS_GETPMSG = 181 + SYS_PUTPMSG = 182 + SYS_AFS_SYSCALL = 183 + SYS_TUXCALL = 184 + SYS_SECURITY = 185 + SYS_GETTID = 186 + SYS_READAHEAD = 187 + SYS_SETXATTR = 188 + SYS_LSETXATTR = 189 + SYS_FSETXATTR = 190 + SYS_GETXATTR = 191 + SYS_LGETXATTR = 192 + SYS_FGETXATTR = 193 + SYS_LISTXATTR = 194 + SYS_LLISTXATTR = 195 + SYS_FLISTXATTR = 196 + SYS_REMOVEXATTR = 197 + SYS_LREMOVEXATTR = 198 + SYS_FREMOVEXATTR = 199 + SYS_TKILL = 200 + SYS_TIME = 201 + SYS_FUTEX = 202 + SYS_SCHED_SETAFFINITY = 203 + SYS_SCHED_GETAFFINITY = 204 + SYS_SET_THREAD_AREA = 205 + SYS_IO_SETUP = 206 + SYS_IO_DESTROY = 207 + SYS_IO_GETEVENTS = 208 + SYS_IO_SUBMIT = 209 + SYS_IO_CANCEL = 210 + SYS_GET_THREAD_AREA = 211 + SYS_LOOKUP_DCOOKIE = 212 + SYS_EPOLL_CREATE = 213 + SYS_EPOLL_CTL_OLD = 214 + SYS_EPOLL_WAIT_OLD = 215 + SYS_REMAP_FILE_PAGES = 216 + SYS_GETDENTS64 = 217 + SYS_SET_TID_ADDRESS = 218 + SYS_RESTART_SYSCALL = 219 + SYS_SEMTIMEDOP = 220 + SYS_FADVISE64 = 221 + SYS_TIMER_CREATE = 222 + SYS_TIMER_SETTIME = 223 + SYS_TIMER_GETTIME = 224 + SYS_TIMER_GETOVERRUN = 225 + SYS_TIMER_DELETE = 226 + SYS_CLOCK_SETTIME = 227 + SYS_CLOCK_GETTIME = 228 + SYS_CLOCK_GETRES = 229 + SYS_CLOCK_NANOSLEEP = 230 + SYS_EXIT_GROUP = 231 + SYS_EPOLL_WAIT = 232 + SYS_EPOLL_CTL = 233 + SYS_TGKILL = 234 + SYS_UTIMES = 235 + SYS_VSERVER = 236 + SYS_MBIND = 237 + SYS_SET_MEMPOLICY = 238 + SYS_GET_MEMPOLICY = 239 + SYS_MQ_OPEN = 240 + SYS_MQ_UNLINK = 241 + SYS_MQ_TIMEDSEND = 242 + SYS_MQ_TIMEDRECEIVE = 243 + SYS_MQ_NOTIFY = 244 + SYS_MQ_GETSETATTR = 245 + SYS_KEXEC_LOAD = 246 + SYS_WAITID = 247 + SYS_ADD_KEY = 248 + SYS_REQUEST_KEY = 249 + SYS_KEYCTL = 250 + SYS_IOPRIO_SET = 251 + SYS_IOPRIO_GET = 252 + SYS_INOTIFY_INIT = 253 + SYS_INOTIFY_ADD_WATCH = 254 + SYS_INOTIFY_RM_WATCH = 255 + SYS_MIGRATE_PAGES = 256 + SYS_OPENAT = 257 + SYS_MKDIRAT = 258 + SYS_MKNODAT = 259 + SYS_FCHOWNAT = 260 + SYS_FUTIMESAT = 261 + SYS_NEWFSTATAT = 262 + SYS_UNLINKAT = 263 + SYS_RENAMEAT = 264 + SYS_LINKAT = 265 + SYS_SYMLINKAT = 266 + SYS_READLINKAT = 267 + SYS_FCHMODAT = 268 + SYS_FACCESSAT = 269 + SYS_PSELECT6 = 270 + SYS_PPOLL = 271 + SYS_UNSHARE = 272 + SYS_SET_ROBUST_LIST = 273 + SYS_GET_ROBUST_LIST = 274 + SYS_SPLICE = 275 + SYS_TEE = 276 + SYS_SYNC_FILE_RANGE = 277 + SYS_VMSPLICE = 278 + SYS_MOVE_PAGES = 279 + SYS_UTIMENSAT = 280 + SYS_EPOLL_PWAIT = 281 + SYS_SIGNALFD = 282 + SYS_TIMERFD_CREATE = 283 + SYS_EVENTFD = 284 + SYS_FALLOCATE = 285 + SYS_TIMERFD_SETTIME = 286 + SYS_TIMERFD_GETTIME = 287 + SYS_ACCEPT4 = 288 + SYS_SIGNALFD4 = 289 + SYS_EVENTFD2 = 290 + SYS_EPOLL_CREATE1 = 291 + SYS_DUP3 = 292 + SYS_PIPE2 = 293 + SYS_INOTIFY_INIT1 = 294 + SYS_PREADV = 295 + SYS_PWRITEV = 296 + SYS_RT_TGSIGQUEUEINFO = 297 + SYS_PERF_EVENT_OPEN = 298 + SYS_RECVMMSG = 299 + SYS_FANOTIFY_INIT = 300 + SYS_FANOTIFY_MARK = 301 + SYS_PRLIMIT64 = 302 + SYS_NAME_TO_HANDLE_AT = 303 + SYS_OPEN_BY_HANDLE_AT = 304 + SYS_CLOCK_ADJTIME = 305 + SYS_SYNCFS = 306 + SYS_SENDMMSG = 307 + SYS_SETNS = 308 + SYS_GETCPU = 309 + SYS_PROCESS_VM_READV = 310 + SYS_PROCESS_VM_WRITEV = 311 + SYS_KCMP = 312 + SYS_FINIT_MODULE = 313 + SYS_SCHED_SETATTR = 314 + SYS_SCHED_GETATTR = 315 + SYS_RENAMEAT2 = 316 + SYS_SECCOMP = 317 + SYS_GETRANDOM = 318 + SYS_MEMFD_CREATE = 319 + SYS_KEXEC_FILE_LOAD = 320 + SYS_BPF = 321 + SYS_EXECVEAT = 322 + SYS_USERFAULTFD = 323 + SYS_MEMBARRIER = 324 + SYS_MLOCK2 = 325 + SYS_COPY_FILE_RANGE = 326 + SYS_PREADV2 = 327 + SYS_PWRITEV2 = 328 + SYS_PKEY_MPROTECT = 329 + SYS_PKEY_ALLOC = 330 + SYS_PKEY_FREE = 331 + SYS_STATX = 332 + SYS_IO_PGETEVENTS = 333 + SYS_RSEQ = 334 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_MEMFD_SECRET = 447 + SYS_PROCESS_MRELEASE = 448 + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 3b1c10513..aca17b6fa 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -7,6 +7,7 @@ package unix const ( + SYS_SYSCALL_MASK = 0 SYS_RESTART_SYSCALL = 0 SYS_EXIT = 1 SYS_FORK = 2 @@ -403,4 +404,11 @@ const ( SYS_PROCESS_MADVISE = 440 SYS_EPOLL_PWAIT2 = 441 SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 3198adcf7..54b4dfa54 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -7,303 +7,311 @@ package unix const ( - SYS_IO_SETUP = 0 - SYS_IO_DESTROY = 1 - SYS_IO_SUBMIT = 2 - SYS_IO_CANCEL = 3 - SYS_IO_GETEVENTS = 4 - SYS_SETXATTR = 5 - SYS_LSETXATTR = 6 - SYS_FSETXATTR = 7 - SYS_GETXATTR = 8 - SYS_LGETXATTR = 9 - SYS_FGETXATTR = 10 - SYS_LISTXATTR = 11 - SYS_LLISTXATTR = 12 - SYS_FLISTXATTR = 13 - SYS_REMOVEXATTR = 14 - SYS_LREMOVEXATTR = 15 - SYS_FREMOVEXATTR = 16 - SYS_GETCWD = 17 - SYS_LOOKUP_DCOOKIE = 18 - SYS_EVENTFD2 = 19 - SYS_EPOLL_CREATE1 = 20 - SYS_EPOLL_CTL = 21 - SYS_EPOLL_PWAIT = 22 - SYS_DUP = 23 - SYS_DUP3 = 24 - SYS_FCNTL = 25 - SYS_INOTIFY_INIT1 = 26 - SYS_INOTIFY_ADD_WATCH = 27 - SYS_INOTIFY_RM_WATCH = 28 - SYS_IOCTL = 29 - SYS_IOPRIO_SET = 30 - SYS_IOPRIO_GET = 31 - SYS_FLOCK = 32 - SYS_MKNODAT = 33 - SYS_MKDIRAT = 34 - SYS_UNLINKAT = 35 - SYS_SYMLINKAT = 36 - SYS_LINKAT = 37 - SYS_RENAMEAT = 38 - SYS_UMOUNT2 = 39 - SYS_MOUNT = 40 - SYS_PIVOT_ROOT = 41 - SYS_NFSSERVCTL = 42 - SYS_STATFS = 43 - SYS_FSTATFS = 44 - SYS_TRUNCATE = 45 - SYS_FTRUNCATE = 46 - SYS_FALLOCATE = 47 - SYS_FACCESSAT = 48 - SYS_CHDIR = 49 - SYS_FCHDIR = 50 - SYS_CHROOT = 51 - SYS_FCHMOD = 52 - SYS_FCHMODAT = 53 - SYS_FCHOWNAT = 54 - SYS_FCHOWN = 55 - SYS_OPENAT = 56 - SYS_CLOSE = 57 - SYS_VHANGUP = 58 - SYS_PIPE2 = 59 - SYS_QUOTACTL = 60 - SYS_GETDENTS64 = 61 - SYS_LSEEK = 62 - SYS_READ = 63 - SYS_WRITE = 64 - SYS_READV = 65 - SYS_WRITEV = 66 - SYS_PREAD64 = 67 - SYS_PWRITE64 = 68 - SYS_PREADV = 69 - SYS_PWRITEV = 70 - SYS_SENDFILE = 71 - SYS_PSELECT6 = 72 - SYS_PPOLL = 73 - SYS_SIGNALFD4 = 74 - SYS_VMSPLICE = 75 - SYS_SPLICE = 76 - SYS_TEE = 77 - SYS_READLINKAT = 78 - SYS_FSTATAT = 79 - SYS_FSTAT = 80 - SYS_SYNC = 81 - SYS_FSYNC = 82 - SYS_FDATASYNC = 83 - SYS_SYNC_FILE_RANGE = 84 - SYS_TIMERFD_CREATE = 85 - SYS_TIMERFD_SETTIME = 86 - SYS_TIMERFD_GETTIME = 87 - SYS_UTIMENSAT = 88 - SYS_ACCT = 89 - SYS_CAPGET = 90 - SYS_CAPSET = 91 - SYS_PERSONALITY = 92 - SYS_EXIT = 93 - SYS_EXIT_GROUP = 94 - SYS_WAITID = 95 - SYS_SET_TID_ADDRESS = 96 - SYS_UNSHARE = 97 - SYS_FUTEX = 98 - SYS_SET_ROBUST_LIST = 99 - SYS_GET_ROBUST_LIST = 100 - SYS_NANOSLEEP = 101 - SYS_GETITIMER = 102 - SYS_SETITIMER = 103 - SYS_KEXEC_LOAD = 104 - SYS_INIT_MODULE = 105 - SYS_DELETE_MODULE = 106 - SYS_TIMER_CREATE = 107 - SYS_TIMER_GETTIME = 108 - SYS_TIMER_GETOVERRUN = 109 - SYS_TIMER_SETTIME = 110 - SYS_TIMER_DELETE = 111 - SYS_CLOCK_SETTIME = 112 - SYS_CLOCK_GETTIME = 113 - SYS_CLOCK_GETRES = 114 - SYS_CLOCK_NANOSLEEP = 115 - SYS_SYSLOG = 116 - SYS_PTRACE = 117 - SYS_SCHED_SETPARAM = 118 - SYS_SCHED_SETSCHEDULER = 119 - SYS_SCHED_GETSCHEDULER = 120 - SYS_SCHED_GETPARAM = 121 - SYS_SCHED_SETAFFINITY = 122 - SYS_SCHED_GETAFFINITY = 123 - SYS_SCHED_YIELD = 124 - SYS_SCHED_GET_PRIORITY_MAX = 125 - SYS_SCHED_GET_PRIORITY_MIN = 126 - SYS_SCHED_RR_GET_INTERVAL = 127 - SYS_RESTART_SYSCALL = 128 - SYS_KILL = 129 - SYS_TKILL = 130 - SYS_TGKILL = 131 - SYS_SIGALTSTACK = 132 - SYS_RT_SIGSUSPEND = 133 - SYS_RT_SIGACTION = 134 - SYS_RT_SIGPROCMASK = 135 - SYS_RT_SIGPENDING = 136 - SYS_RT_SIGTIMEDWAIT = 137 - SYS_RT_SIGQUEUEINFO = 138 - SYS_RT_SIGRETURN = 139 - SYS_SETPRIORITY = 140 - SYS_GETPRIORITY = 141 - SYS_REBOOT = 142 - SYS_SETREGID = 143 - SYS_SETGID = 144 - SYS_SETREUID = 145 - SYS_SETUID = 146 - SYS_SETRESUID = 147 - SYS_GETRESUID = 148 - SYS_SETRESGID = 149 - SYS_GETRESGID = 150 - SYS_SETFSUID = 151 - SYS_SETFSGID = 152 - SYS_TIMES = 153 - SYS_SETPGID = 154 - SYS_GETPGID = 155 - SYS_GETSID = 156 - SYS_SETSID = 157 - SYS_GETGROUPS = 158 - SYS_SETGROUPS = 159 - SYS_UNAME = 160 - SYS_SETHOSTNAME = 161 - SYS_SETDOMAINNAME = 162 - SYS_GETRLIMIT = 163 - SYS_SETRLIMIT = 164 - SYS_GETRUSAGE = 165 - SYS_UMASK = 166 - SYS_PRCTL = 167 - SYS_GETCPU = 168 - SYS_GETTIMEOFDAY = 169 - SYS_SETTIMEOFDAY = 170 - SYS_ADJTIMEX = 171 - SYS_GETPID = 172 - SYS_GETPPID = 173 - SYS_GETUID = 174 - SYS_GETEUID = 175 - SYS_GETGID = 176 - SYS_GETEGID = 177 - SYS_GETTID = 178 - SYS_SYSINFO = 179 - SYS_MQ_OPEN = 180 - SYS_MQ_UNLINK = 181 - SYS_MQ_TIMEDSEND = 182 - SYS_MQ_TIMEDRECEIVE = 183 - SYS_MQ_NOTIFY = 184 - SYS_MQ_GETSETATTR = 185 - SYS_MSGGET = 186 - SYS_MSGCTL = 187 - SYS_MSGRCV = 188 - SYS_MSGSND = 189 - SYS_SEMGET = 190 - SYS_SEMCTL = 191 - SYS_SEMTIMEDOP = 192 - SYS_SEMOP = 193 - SYS_SHMGET = 194 - SYS_SHMCTL = 195 - SYS_SHMAT = 196 - SYS_SHMDT = 197 - SYS_SOCKET = 198 - SYS_SOCKETPAIR = 199 - SYS_BIND = 200 - SYS_LISTEN = 201 - SYS_ACCEPT = 202 - SYS_CONNECT = 203 - SYS_GETSOCKNAME = 204 - SYS_GETPEERNAME = 205 - SYS_SENDTO = 206 - SYS_RECVFROM = 207 - SYS_SETSOCKOPT = 208 - SYS_GETSOCKOPT = 209 - SYS_SHUTDOWN = 210 - SYS_SENDMSG = 211 - SYS_RECVMSG = 212 - SYS_READAHEAD = 213 - SYS_BRK = 214 - SYS_MUNMAP = 215 - SYS_MREMAP = 216 - SYS_ADD_KEY = 217 - SYS_REQUEST_KEY = 218 - SYS_KEYCTL = 219 - SYS_CLONE = 220 - SYS_EXECVE = 221 - SYS_MMAP = 222 - SYS_FADVISE64 = 223 - SYS_SWAPON = 224 - SYS_SWAPOFF = 225 - SYS_MPROTECT = 226 - SYS_MSYNC = 227 - SYS_MLOCK = 228 - SYS_MUNLOCK = 229 - SYS_MLOCKALL = 230 - SYS_MUNLOCKALL = 231 - SYS_MINCORE = 232 - SYS_MADVISE = 233 - SYS_REMAP_FILE_PAGES = 234 - SYS_MBIND = 235 - SYS_GET_MEMPOLICY = 236 - SYS_SET_MEMPOLICY = 237 - SYS_MIGRATE_PAGES = 238 - SYS_MOVE_PAGES = 239 - SYS_RT_TGSIGQUEUEINFO = 240 - SYS_PERF_EVENT_OPEN = 241 - SYS_ACCEPT4 = 242 - SYS_RECVMMSG = 243 - SYS_ARCH_SPECIFIC_SYSCALL = 244 - SYS_WAIT4 = 260 - SYS_PRLIMIT64 = 261 - SYS_FANOTIFY_INIT = 262 - SYS_FANOTIFY_MARK = 263 - SYS_NAME_TO_HANDLE_AT = 264 - SYS_OPEN_BY_HANDLE_AT = 265 - SYS_CLOCK_ADJTIME = 266 - SYS_SYNCFS = 267 - SYS_SETNS = 268 - SYS_SENDMMSG = 269 - SYS_PROCESS_VM_READV = 270 - SYS_PROCESS_VM_WRITEV = 271 - SYS_KCMP = 272 - SYS_FINIT_MODULE = 273 - SYS_SCHED_SETATTR = 274 - SYS_SCHED_GETATTR = 275 - SYS_RENAMEAT2 = 276 - SYS_SECCOMP = 277 - SYS_GETRANDOM = 278 - SYS_MEMFD_CREATE = 279 - SYS_BPF = 280 - SYS_EXECVEAT = 281 - SYS_USERFAULTFD = 282 - SYS_MEMBARRIER = 283 - SYS_MLOCK2 = 284 - SYS_COPY_FILE_RANGE = 285 - SYS_PREADV2 = 286 - SYS_PWRITEV2 = 287 - SYS_PKEY_MPROTECT = 288 - SYS_PKEY_ALLOC = 289 - SYS_PKEY_FREE = 290 - SYS_STATX = 291 - SYS_IO_PGETEVENTS = 292 - SYS_RSEQ = 293 - SYS_KEXEC_FILE_LOAD = 294 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLONE3 = 435 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_IO_SETUP = 0 + SYS_IO_DESTROY = 1 + SYS_IO_SUBMIT = 2 + SYS_IO_CANCEL = 3 + SYS_IO_GETEVENTS = 4 + SYS_SETXATTR = 5 + SYS_LSETXATTR = 6 + SYS_FSETXATTR = 7 + SYS_GETXATTR = 8 + SYS_LGETXATTR = 9 + SYS_FGETXATTR = 10 + SYS_LISTXATTR = 11 + SYS_LLISTXATTR = 12 + SYS_FLISTXATTR = 13 + SYS_REMOVEXATTR = 14 + SYS_LREMOVEXATTR = 15 + SYS_FREMOVEXATTR = 16 + SYS_GETCWD = 17 + SYS_LOOKUP_DCOOKIE = 18 + SYS_EVENTFD2 = 19 + SYS_EPOLL_CREATE1 = 20 + SYS_EPOLL_CTL = 21 + SYS_EPOLL_PWAIT = 22 + SYS_DUP = 23 + SYS_DUP3 = 24 + SYS_FCNTL = 25 + SYS_INOTIFY_INIT1 = 26 + SYS_INOTIFY_ADD_WATCH = 27 + SYS_INOTIFY_RM_WATCH = 28 + SYS_IOCTL = 29 + SYS_IOPRIO_SET = 30 + SYS_IOPRIO_GET = 31 + SYS_FLOCK = 32 + SYS_MKNODAT = 33 + SYS_MKDIRAT = 34 + SYS_UNLINKAT = 35 + SYS_SYMLINKAT = 36 + SYS_LINKAT = 37 + SYS_RENAMEAT = 38 + SYS_UMOUNT2 = 39 + SYS_MOUNT = 40 + SYS_PIVOT_ROOT = 41 + SYS_NFSSERVCTL = 42 + SYS_STATFS = 43 + SYS_FSTATFS = 44 + SYS_TRUNCATE = 45 + SYS_FTRUNCATE = 46 + SYS_FALLOCATE = 47 + SYS_FACCESSAT = 48 + SYS_CHDIR = 49 + SYS_FCHDIR = 50 + SYS_CHROOT = 51 + SYS_FCHMOD = 52 + SYS_FCHMODAT = 53 + SYS_FCHOWNAT = 54 + SYS_FCHOWN = 55 + SYS_OPENAT = 56 + SYS_CLOSE = 57 + SYS_VHANGUP = 58 + SYS_PIPE2 = 59 + SYS_QUOTACTL = 60 + SYS_GETDENTS64 = 61 + SYS_LSEEK = 62 + SYS_READ = 63 + SYS_WRITE = 64 + SYS_READV = 65 + SYS_WRITEV = 66 + SYS_PREAD64 = 67 + SYS_PWRITE64 = 68 + SYS_PREADV = 69 + SYS_PWRITEV = 70 + SYS_SENDFILE = 71 + SYS_PSELECT6 = 72 + SYS_PPOLL = 73 + SYS_SIGNALFD4 = 74 + SYS_VMSPLICE = 75 + SYS_SPLICE = 76 + SYS_TEE = 77 + SYS_READLINKAT = 78 + SYS_FSTATAT = 79 + SYS_FSTAT = 80 + SYS_SYNC = 81 + SYS_FSYNC = 82 + SYS_FDATASYNC = 83 + SYS_SYNC_FILE_RANGE = 84 + SYS_TIMERFD_CREATE = 85 + SYS_TIMERFD_SETTIME = 86 + SYS_TIMERFD_GETTIME = 87 + SYS_UTIMENSAT = 88 + SYS_ACCT = 89 + SYS_CAPGET = 90 + SYS_CAPSET = 91 + SYS_PERSONALITY = 92 + SYS_EXIT = 93 + SYS_EXIT_GROUP = 94 + SYS_WAITID = 95 + SYS_SET_TID_ADDRESS = 96 + SYS_UNSHARE = 97 + SYS_FUTEX = 98 + SYS_SET_ROBUST_LIST = 99 + SYS_GET_ROBUST_LIST = 100 + SYS_NANOSLEEP = 101 + SYS_GETITIMER = 102 + SYS_SETITIMER = 103 + SYS_KEXEC_LOAD = 104 + SYS_INIT_MODULE = 105 + SYS_DELETE_MODULE = 106 + SYS_TIMER_CREATE = 107 + SYS_TIMER_GETTIME = 108 + SYS_TIMER_GETOVERRUN = 109 + SYS_TIMER_SETTIME = 110 + SYS_TIMER_DELETE = 111 + SYS_CLOCK_SETTIME = 112 + SYS_CLOCK_GETTIME = 113 + SYS_CLOCK_GETRES = 114 + SYS_CLOCK_NANOSLEEP = 115 + SYS_SYSLOG = 116 + SYS_PTRACE = 117 + SYS_SCHED_SETPARAM = 118 + SYS_SCHED_SETSCHEDULER = 119 + SYS_SCHED_GETSCHEDULER = 120 + SYS_SCHED_GETPARAM = 121 + SYS_SCHED_SETAFFINITY = 122 + SYS_SCHED_GETAFFINITY = 123 + SYS_SCHED_YIELD = 124 + SYS_SCHED_GET_PRIORITY_MAX = 125 + SYS_SCHED_GET_PRIORITY_MIN = 126 + SYS_SCHED_RR_GET_INTERVAL = 127 + SYS_RESTART_SYSCALL = 128 + SYS_KILL = 129 + SYS_TKILL = 130 + SYS_TGKILL = 131 + SYS_SIGALTSTACK = 132 + SYS_RT_SIGSUSPEND = 133 + SYS_RT_SIGACTION = 134 + SYS_RT_SIGPROCMASK = 135 + SYS_RT_SIGPENDING = 136 + SYS_RT_SIGTIMEDWAIT = 137 + SYS_RT_SIGQUEUEINFO = 138 + SYS_RT_SIGRETURN = 139 + SYS_SETPRIORITY = 140 + SYS_GETPRIORITY = 141 + SYS_REBOOT = 142 + SYS_SETREGID = 143 + SYS_SETGID = 144 + SYS_SETREUID = 145 + SYS_SETUID = 146 + SYS_SETRESUID = 147 + SYS_GETRESUID = 148 + SYS_SETRESGID = 149 + SYS_GETRESGID = 150 + SYS_SETFSUID = 151 + SYS_SETFSGID = 152 + SYS_TIMES = 153 + SYS_SETPGID = 154 + SYS_GETPGID = 155 + SYS_GETSID = 156 + SYS_SETSID = 157 + SYS_GETGROUPS = 158 + SYS_SETGROUPS = 159 + SYS_UNAME = 160 + SYS_SETHOSTNAME = 161 + SYS_SETDOMAINNAME = 162 + SYS_GETRLIMIT = 163 + SYS_SETRLIMIT = 164 + SYS_GETRUSAGE = 165 + SYS_UMASK = 166 + SYS_PRCTL = 167 + SYS_GETCPU = 168 + SYS_GETTIMEOFDAY = 169 + SYS_SETTIMEOFDAY = 170 + SYS_ADJTIMEX = 171 + SYS_GETPID = 172 + SYS_GETPPID = 173 + SYS_GETUID = 174 + SYS_GETEUID = 175 + SYS_GETGID = 176 + SYS_GETEGID = 177 + SYS_GETTID = 178 + SYS_SYSINFO = 179 + SYS_MQ_OPEN = 180 + SYS_MQ_UNLINK = 181 + SYS_MQ_TIMEDSEND = 182 + SYS_MQ_TIMEDRECEIVE = 183 + SYS_MQ_NOTIFY = 184 + SYS_MQ_GETSETATTR = 185 + SYS_MSGGET = 186 + SYS_MSGCTL = 187 + SYS_MSGRCV = 188 + SYS_MSGSND = 189 + SYS_SEMGET = 190 + SYS_SEMCTL = 191 + SYS_SEMTIMEDOP = 192 + SYS_SEMOP = 193 + SYS_SHMGET = 194 + SYS_SHMCTL = 195 + SYS_SHMAT = 196 + SYS_SHMDT = 197 + SYS_SOCKET = 198 + SYS_SOCKETPAIR = 199 + SYS_BIND = 200 + SYS_LISTEN = 201 + SYS_ACCEPT = 202 + SYS_CONNECT = 203 + SYS_GETSOCKNAME = 204 + SYS_GETPEERNAME = 205 + SYS_SENDTO = 206 + SYS_RECVFROM = 207 + SYS_SETSOCKOPT = 208 + SYS_GETSOCKOPT = 209 + SYS_SHUTDOWN = 210 + SYS_SENDMSG = 211 + SYS_RECVMSG = 212 + SYS_READAHEAD = 213 + SYS_BRK = 214 + SYS_MUNMAP = 215 + SYS_MREMAP = 216 + SYS_ADD_KEY = 217 + SYS_REQUEST_KEY = 218 + SYS_KEYCTL = 219 + SYS_CLONE = 220 + SYS_EXECVE = 221 + SYS_MMAP = 222 + SYS_FADVISE64 = 223 + SYS_SWAPON = 224 + SYS_SWAPOFF = 225 + SYS_MPROTECT = 226 + SYS_MSYNC = 227 + SYS_MLOCK = 228 + SYS_MUNLOCK = 229 + SYS_MLOCKALL = 230 + SYS_MUNLOCKALL = 231 + SYS_MINCORE = 232 + SYS_MADVISE = 233 + SYS_REMAP_FILE_PAGES = 234 + SYS_MBIND = 235 + SYS_GET_MEMPOLICY = 236 + SYS_SET_MEMPOLICY = 237 + SYS_MIGRATE_PAGES = 238 + SYS_MOVE_PAGES = 239 + SYS_RT_TGSIGQUEUEINFO = 240 + SYS_PERF_EVENT_OPEN = 241 + SYS_ACCEPT4 = 242 + SYS_RECVMMSG = 243 + SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_WAIT4 = 260 + SYS_PRLIMIT64 = 261 + SYS_FANOTIFY_INIT = 262 + SYS_FANOTIFY_MARK = 263 + SYS_NAME_TO_HANDLE_AT = 264 + SYS_OPEN_BY_HANDLE_AT = 265 + SYS_CLOCK_ADJTIME = 266 + SYS_SYNCFS = 267 + SYS_SETNS = 268 + SYS_SENDMMSG = 269 + SYS_PROCESS_VM_READV = 270 + SYS_PROCESS_VM_WRITEV = 271 + SYS_KCMP = 272 + SYS_FINIT_MODULE = 273 + SYS_SCHED_SETATTR = 274 + SYS_SCHED_GETATTR = 275 + SYS_RENAMEAT2 = 276 + SYS_SECCOMP = 277 + SYS_GETRANDOM = 278 + SYS_MEMFD_CREATE = 279 + SYS_BPF = 280 + SYS_EXECVEAT = 281 + SYS_USERFAULTFD = 282 + SYS_MEMBARRIER = 283 + SYS_MLOCK2 = 284 + SYS_COPY_FILE_RANGE = 285 + SYS_PREADV2 = 286 + SYS_PWRITEV2 = 287 + SYS_PKEY_MPROTECT = 288 + SYS_PKEY_ALLOC = 289 + SYS_PKEY_FREE = 290 + SYS_STATX = 291 + SYS_IO_PGETEVENTS = 292 + SYS_RSEQ = 293 + SYS_KEXEC_FILE_LOAD = 294 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_MEMFD_SECRET = 447 + SYS_PROCESS_MRELEASE = 448 + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go new file mode 100644 index 000000000..44a764c99 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -0,0 +1,311 @@ +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build loong64 && linux +// +build loong64,linux + +package unix + +const ( + SYS_IO_SETUP = 0 + SYS_IO_DESTROY = 1 + SYS_IO_SUBMIT = 2 + SYS_IO_CANCEL = 3 + SYS_IO_GETEVENTS = 4 + SYS_SETXATTR = 5 + SYS_LSETXATTR = 6 + SYS_FSETXATTR = 7 + SYS_GETXATTR = 8 + SYS_LGETXATTR = 9 + SYS_FGETXATTR = 10 + SYS_LISTXATTR = 11 + SYS_LLISTXATTR = 12 + SYS_FLISTXATTR = 13 + SYS_REMOVEXATTR = 14 + SYS_LREMOVEXATTR = 15 + SYS_FREMOVEXATTR = 16 + SYS_GETCWD = 17 + SYS_LOOKUP_DCOOKIE = 18 + SYS_EVENTFD2 = 19 + SYS_EPOLL_CREATE1 = 20 + SYS_EPOLL_CTL = 21 + SYS_EPOLL_PWAIT = 22 + SYS_DUP = 23 + SYS_DUP3 = 24 + SYS_FCNTL = 25 + SYS_INOTIFY_INIT1 = 26 + SYS_INOTIFY_ADD_WATCH = 27 + SYS_INOTIFY_RM_WATCH = 28 + SYS_IOCTL = 29 + SYS_IOPRIO_SET = 30 + SYS_IOPRIO_GET = 31 + SYS_FLOCK = 32 + SYS_MKNODAT = 33 + SYS_MKDIRAT = 34 + SYS_UNLINKAT = 35 + SYS_SYMLINKAT = 36 + SYS_LINKAT = 37 + SYS_UMOUNT2 = 39 + SYS_MOUNT = 40 + SYS_PIVOT_ROOT = 41 + SYS_NFSSERVCTL = 42 + SYS_STATFS = 43 + SYS_FSTATFS = 44 + SYS_TRUNCATE = 45 + SYS_FTRUNCATE = 46 + SYS_FALLOCATE = 47 + SYS_FACCESSAT = 48 + SYS_CHDIR = 49 + SYS_FCHDIR = 50 + SYS_CHROOT = 51 + SYS_FCHMOD = 52 + SYS_FCHMODAT = 53 + SYS_FCHOWNAT = 54 + SYS_FCHOWN = 55 + SYS_OPENAT = 56 + SYS_CLOSE = 57 + SYS_VHANGUP = 58 + SYS_PIPE2 = 59 + SYS_QUOTACTL = 60 + SYS_GETDENTS64 = 61 + SYS_LSEEK = 62 + SYS_READ = 63 + SYS_WRITE = 64 + SYS_READV = 65 + SYS_WRITEV = 66 + SYS_PREAD64 = 67 + SYS_PWRITE64 = 68 + SYS_PREADV = 69 + SYS_PWRITEV = 70 + SYS_SENDFILE = 71 + SYS_PSELECT6 = 72 + SYS_PPOLL = 73 + SYS_SIGNALFD4 = 74 + SYS_VMSPLICE = 75 + SYS_SPLICE = 76 + SYS_TEE = 77 + SYS_READLINKAT = 78 + SYS_SYNC = 81 + SYS_FSYNC = 82 + SYS_FDATASYNC = 83 + SYS_SYNC_FILE_RANGE = 84 + SYS_TIMERFD_CREATE = 85 + SYS_TIMERFD_SETTIME = 86 + SYS_TIMERFD_GETTIME = 87 + SYS_UTIMENSAT = 88 + SYS_ACCT = 89 + SYS_CAPGET = 90 + SYS_CAPSET = 91 + SYS_PERSONALITY = 92 + SYS_EXIT = 93 + SYS_EXIT_GROUP = 94 + SYS_WAITID = 95 + SYS_SET_TID_ADDRESS = 96 + SYS_UNSHARE = 97 + SYS_FUTEX = 98 + SYS_SET_ROBUST_LIST = 99 + SYS_GET_ROBUST_LIST = 100 + SYS_NANOSLEEP = 101 + SYS_GETITIMER = 102 + SYS_SETITIMER = 103 + SYS_KEXEC_LOAD = 104 + SYS_INIT_MODULE = 105 + SYS_DELETE_MODULE = 106 + SYS_TIMER_CREATE = 107 + SYS_TIMER_GETTIME = 108 + SYS_TIMER_GETOVERRUN = 109 + SYS_TIMER_SETTIME = 110 + SYS_TIMER_DELETE = 111 + SYS_CLOCK_SETTIME = 112 + SYS_CLOCK_GETTIME = 113 + SYS_CLOCK_GETRES = 114 + SYS_CLOCK_NANOSLEEP = 115 + SYS_SYSLOG = 116 + SYS_PTRACE = 117 + SYS_SCHED_SETPARAM = 118 + SYS_SCHED_SETSCHEDULER = 119 + SYS_SCHED_GETSCHEDULER = 120 + SYS_SCHED_GETPARAM = 121 + SYS_SCHED_SETAFFINITY = 122 + SYS_SCHED_GETAFFINITY = 123 + SYS_SCHED_YIELD = 124 + SYS_SCHED_GET_PRIORITY_MAX = 125 + SYS_SCHED_GET_PRIORITY_MIN = 126 + SYS_SCHED_RR_GET_INTERVAL = 127 + SYS_RESTART_SYSCALL = 128 + SYS_KILL = 129 + SYS_TKILL = 130 + SYS_TGKILL = 131 + SYS_SIGALTSTACK = 132 + SYS_RT_SIGSUSPEND = 133 + SYS_RT_SIGACTION = 134 + SYS_RT_SIGPROCMASK = 135 + SYS_RT_SIGPENDING = 136 + SYS_RT_SIGTIMEDWAIT = 137 + SYS_RT_SIGQUEUEINFO = 138 + SYS_RT_SIGRETURN = 139 + SYS_SETPRIORITY = 140 + SYS_GETPRIORITY = 141 + SYS_REBOOT = 142 + SYS_SETREGID = 143 + SYS_SETGID = 144 + SYS_SETREUID = 145 + SYS_SETUID = 146 + SYS_SETRESUID = 147 + SYS_GETRESUID = 148 + SYS_SETRESGID = 149 + SYS_GETRESGID = 150 + SYS_SETFSUID = 151 + SYS_SETFSGID = 152 + SYS_TIMES = 153 + SYS_SETPGID = 154 + SYS_GETPGID = 155 + SYS_GETSID = 156 + SYS_SETSID = 157 + SYS_GETGROUPS = 158 + SYS_SETGROUPS = 159 + SYS_UNAME = 160 + SYS_SETHOSTNAME = 161 + SYS_SETDOMAINNAME = 162 + SYS_GETRUSAGE = 165 + SYS_UMASK = 166 + SYS_PRCTL = 167 + SYS_GETCPU = 168 + SYS_GETTIMEOFDAY = 169 + SYS_SETTIMEOFDAY = 170 + SYS_ADJTIMEX = 171 + SYS_GETPID = 172 + SYS_GETPPID = 173 + SYS_GETUID = 174 + SYS_GETEUID = 175 + SYS_GETGID = 176 + SYS_GETEGID = 177 + SYS_GETTID = 178 + SYS_SYSINFO = 179 + SYS_MQ_OPEN = 180 + SYS_MQ_UNLINK = 181 + SYS_MQ_TIMEDSEND = 182 + SYS_MQ_TIMEDRECEIVE = 183 + SYS_MQ_NOTIFY = 184 + SYS_MQ_GETSETATTR = 185 + SYS_MSGGET = 186 + SYS_MSGCTL = 187 + SYS_MSGRCV = 188 + SYS_MSGSND = 189 + SYS_SEMGET = 190 + SYS_SEMCTL = 191 + SYS_SEMTIMEDOP = 192 + SYS_SEMOP = 193 + SYS_SHMGET = 194 + SYS_SHMCTL = 195 + SYS_SHMAT = 196 + SYS_SHMDT = 197 + SYS_SOCKET = 198 + SYS_SOCKETPAIR = 199 + SYS_BIND = 200 + SYS_LISTEN = 201 + SYS_ACCEPT = 202 + SYS_CONNECT = 203 + SYS_GETSOCKNAME = 204 + SYS_GETPEERNAME = 205 + SYS_SENDTO = 206 + SYS_RECVFROM = 207 + SYS_SETSOCKOPT = 208 + SYS_GETSOCKOPT = 209 + SYS_SHUTDOWN = 210 + SYS_SENDMSG = 211 + SYS_RECVMSG = 212 + SYS_READAHEAD = 213 + SYS_BRK = 214 + SYS_MUNMAP = 215 + SYS_MREMAP = 216 + SYS_ADD_KEY = 217 + SYS_REQUEST_KEY = 218 + SYS_KEYCTL = 219 + SYS_CLONE = 220 + SYS_EXECVE = 221 + SYS_MMAP = 222 + SYS_FADVISE64 = 223 + SYS_SWAPON = 224 + SYS_SWAPOFF = 225 + SYS_MPROTECT = 226 + SYS_MSYNC = 227 + SYS_MLOCK = 228 + SYS_MUNLOCK = 229 + SYS_MLOCKALL = 230 + SYS_MUNLOCKALL = 231 + SYS_MINCORE = 232 + SYS_MADVISE = 233 + SYS_REMAP_FILE_PAGES = 234 + SYS_MBIND = 235 + SYS_GET_MEMPOLICY = 236 + SYS_SET_MEMPOLICY = 237 + SYS_MIGRATE_PAGES = 238 + SYS_MOVE_PAGES = 239 + SYS_RT_TGSIGQUEUEINFO = 240 + SYS_PERF_EVENT_OPEN = 241 + SYS_ACCEPT4 = 242 + SYS_RECVMMSG = 243 + SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_WAIT4 = 260 + SYS_PRLIMIT64 = 261 + SYS_FANOTIFY_INIT = 262 + SYS_FANOTIFY_MARK = 263 + SYS_NAME_TO_HANDLE_AT = 264 + SYS_OPEN_BY_HANDLE_AT = 265 + SYS_CLOCK_ADJTIME = 266 + SYS_SYNCFS = 267 + SYS_SETNS = 268 + SYS_SENDMMSG = 269 + SYS_PROCESS_VM_READV = 270 + SYS_PROCESS_VM_WRITEV = 271 + SYS_KCMP = 272 + SYS_FINIT_MODULE = 273 + SYS_SCHED_SETATTR = 274 + SYS_SCHED_GETATTR = 275 + SYS_RENAMEAT2 = 276 + SYS_SECCOMP = 277 + SYS_GETRANDOM = 278 + SYS_MEMFD_CREATE = 279 + SYS_BPF = 280 + SYS_EXECVEAT = 281 + SYS_USERFAULTFD = 282 + SYS_MEMBARRIER = 283 + SYS_MLOCK2 = 284 + SYS_COPY_FILE_RANGE = 285 + SYS_PREADV2 = 286 + SYS_PWRITEV2 = 287 + SYS_PKEY_MPROTECT = 288 + SYS_PKEY_ALLOC = 289 + SYS_PKEY_FREE = 290 + SYS_STATX = 291 + SYS_IO_PGETEVENTS = 292 + SYS_RSEQ = 293 + SYS_KEXEC_FILE_LOAD = 294 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index c877ec6e6..65a99efc2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -424,4 +424,11 @@ const ( SYS_PROCESS_MADVISE = 4440 SYS_EPOLL_PWAIT2 = 4441 SYS_MOUNT_SETATTR = 4442 + SYS_QUOTACTL_FD = 4443 + SYS_LANDLOCK_CREATE_RULESET = 4444 + SYS_LANDLOCK_ADD_RULE = 4445 + SYS_LANDLOCK_RESTRICT_SELF = 4446 + SYS_PROCESS_MRELEASE = 4448 + SYS_FUTEX_WAITV = 4449 + SYS_SET_MEMPOLICY_HOME_NODE = 4450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index b5f290372..841c8a668 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -7,351 +7,358 @@ package unix const ( - SYS_READ = 5000 - SYS_WRITE = 5001 - SYS_OPEN = 5002 - SYS_CLOSE = 5003 - SYS_STAT = 5004 - SYS_FSTAT = 5005 - SYS_LSTAT = 5006 - SYS_POLL = 5007 - SYS_LSEEK = 5008 - SYS_MMAP = 5009 - SYS_MPROTECT = 5010 - SYS_MUNMAP = 5011 - SYS_BRK = 5012 - SYS_RT_SIGACTION = 5013 - SYS_RT_SIGPROCMASK = 5014 - SYS_IOCTL = 5015 - SYS_PREAD64 = 5016 - SYS_PWRITE64 = 5017 - SYS_READV = 5018 - SYS_WRITEV = 5019 - SYS_ACCESS = 5020 - SYS_PIPE = 5021 - SYS__NEWSELECT = 5022 - SYS_SCHED_YIELD = 5023 - SYS_MREMAP = 5024 - SYS_MSYNC = 5025 - SYS_MINCORE = 5026 - SYS_MADVISE = 5027 - SYS_SHMGET = 5028 - SYS_SHMAT = 5029 - SYS_SHMCTL = 5030 - SYS_DUP = 5031 - SYS_DUP2 = 5032 - SYS_PAUSE = 5033 - SYS_NANOSLEEP = 5034 - SYS_GETITIMER = 5035 - SYS_SETITIMER = 5036 - SYS_ALARM = 5037 - SYS_GETPID = 5038 - SYS_SENDFILE = 5039 - SYS_SOCKET = 5040 - SYS_CONNECT = 5041 - SYS_ACCEPT = 5042 - SYS_SENDTO = 5043 - SYS_RECVFROM = 5044 - SYS_SENDMSG = 5045 - SYS_RECVMSG = 5046 - SYS_SHUTDOWN = 5047 - SYS_BIND = 5048 - SYS_LISTEN = 5049 - SYS_GETSOCKNAME = 5050 - SYS_GETPEERNAME = 5051 - SYS_SOCKETPAIR = 5052 - SYS_SETSOCKOPT = 5053 - SYS_GETSOCKOPT = 5054 - SYS_CLONE = 5055 - SYS_FORK = 5056 - SYS_EXECVE = 5057 - SYS_EXIT = 5058 - SYS_WAIT4 = 5059 - SYS_KILL = 5060 - SYS_UNAME = 5061 - SYS_SEMGET = 5062 - SYS_SEMOP = 5063 - SYS_SEMCTL = 5064 - SYS_SHMDT = 5065 - SYS_MSGGET = 5066 - SYS_MSGSND = 5067 - SYS_MSGRCV = 5068 - SYS_MSGCTL = 5069 - SYS_FCNTL = 5070 - SYS_FLOCK = 5071 - SYS_FSYNC = 5072 - SYS_FDATASYNC = 5073 - SYS_TRUNCATE = 5074 - SYS_FTRUNCATE = 5075 - SYS_GETDENTS = 5076 - SYS_GETCWD = 5077 - SYS_CHDIR = 5078 - SYS_FCHDIR = 5079 - SYS_RENAME = 5080 - SYS_MKDIR = 5081 - SYS_RMDIR = 5082 - SYS_CREAT = 5083 - SYS_LINK = 5084 - SYS_UNLINK = 5085 - SYS_SYMLINK = 5086 - SYS_READLINK = 5087 - SYS_CHMOD = 5088 - SYS_FCHMOD = 5089 - SYS_CHOWN = 5090 - SYS_FCHOWN = 5091 - SYS_LCHOWN = 5092 - SYS_UMASK = 5093 - SYS_GETTIMEOFDAY = 5094 - SYS_GETRLIMIT = 5095 - SYS_GETRUSAGE = 5096 - SYS_SYSINFO = 5097 - SYS_TIMES = 5098 - SYS_PTRACE = 5099 - SYS_GETUID = 5100 - SYS_SYSLOG = 5101 - SYS_GETGID = 5102 - SYS_SETUID = 5103 - SYS_SETGID = 5104 - SYS_GETEUID = 5105 - SYS_GETEGID = 5106 - SYS_SETPGID = 5107 - SYS_GETPPID = 5108 - SYS_GETPGRP = 5109 - SYS_SETSID = 5110 - SYS_SETREUID = 5111 - SYS_SETREGID = 5112 - SYS_GETGROUPS = 5113 - SYS_SETGROUPS = 5114 - SYS_SETRESUID = 5115 - SYS_GETRESUID = 5116 - SYS_SETRESGID = 5117 - SYS_GETRESGID = 5118 - SYS_GETPGID = 5119 - SYS_SETFSUID = 5120 - SYS_SETFSGID = 5121 - SYS_GETSID = 5122 - SYS_CAPGET = 5123 - SYS_CAPSET = 5124 - SYS_RT_SIGPENDING = 5125 - SYS_RT_SIGTIMEDWAIT = 5126 - SYS_RT_SIGQUEUEINFO = 5127 - SYS_RT_SIGSUSPEND = 5128 - SYS_SIGALTSTACK = 5129 - SYS_UTIME = 5130 - SYS_MKNOD = 5131 - SYS_PERSONALITY = 5132 - SYS_USTAT = 5133 - SYS_STATFS = 5134 - SYS_FSTATFS = 5135 - SYS_SYSFS = 5136 - SYS_GETPRIORITY = 5137 - SYS_SETPRIORITY = 5138 - SYS_SCHED_SETPARAM = 5139 - SYS_SCHED_GETPARAM = 5140 - SYS_SCHED_SETSCHEDULER = 5141 - SYS_SCHED_GETSCHEDULER = 5142 - SYS_SCHED_GET_PRIORITY_MAX = 5143 - SYS_SCHED_GET_PRIORITY_MIN = 5144 - SYS_SCHED_RR_GET_INTERVAL = 5145 - SYS_MLOCK = 5146 - SYS_MUNLOCK = 5147 - SYS_MLOCKALL = 5148 - SYS_MUNLOCKALL = 5149 - SYS_VHANGUP = 5150 - SYS_PIVOT_ROOT = 5151 - SYS__SYSCTL = 5152 - SYS_PRCTL = 5153 - SYS_ADJTIMEX = 5154 - SYS_SETRLIMIT = 5155 - SYS_CHROOT = 5156 - SYS_SYNC = 5157 - SYS_ACCT = 5158 - SYS_SETTIMEOFDAY = 5159 - SYS_MOUNT = 5160 - SYS_UMOUNT2 = 5161 - SYS_SWAPON = 5162 - SYS_SWAPOFF = 5163 - SYS_REBOOT = 5164 - SYS_SETHOSTNAME = 5165 - SYS_SETDOMAINNAME = 5166 - SYS_CREATE_MODULE = 5167 - SYS_INIT_MODULE = 5168 - SYS_DELETE_MODULE = 5169 - SYS_GET_KERNEL_SYMS = 5170 - SYS_QUERY_MODULE = 5171 - SYS_QUOTACTL = 5172 - SYS_NFSSERVCTL = 5173 - SYS_GETPMSG = 5174 - SYS_PUTPMSG = 5175 - SYS_AFS_SYSCALL = 5176 - SYS_RESERVED177 = 5177 - SYS_GETTID = 5178 - SYS_READAHEAD = 5179 - SYS_SETXATTR = 5180 - SYS_LSETXATTR = 5181 - SYS_FSETXATTR = 5182 - SYS_GETXATTR = 5183 - SYS_LGETXATTR = 5184 - SYS_FGETXATTR = 5185 - SYS_LISTXATTR = 5186 - SYS_LLISTXATTR = 5187 - SYS_FLISTXATTR = 5188 - SYS_REMOVEXATTR = 5189 - SYS_LREMOVEXATTR = 5190 - SYS_FREMOVEXATTR = 5191 - SYS_TKILL = 5192 - SYS_RESERVED193 = 5193 - SYS_FUTEX = 5194 - SYS_SCHED_SETAFFINITY = 5195 - SYS_SCHED_GETAFFINITY = 5196 - SYS_CACHEFLUSH = 5197 - SYS_CACHECTL = 5198 - SYS_SYSMIPS = 5199 - SYS_IO_SETUP = 5200 - SYS_IO_DESTROY = 5201 - SYS_IO_GETEVENTS = 5202 - SYS_IO_SUBMIT = 5203 - SYS_IO_CANCEL = 5204 - SYS_EXIT_GROUP = 5205 - SYS_LOOKUP_DCOOKIE = 5206 - SYS_EPOLL_CREATE = 5207 - SYS_EPOLL_CTL = 5208 - SYS_EPOLL_WAIT = 5209 - SYS_REMAP_FILE_PAGES = 5210 - SYS_RT_SIGRETURN = 5211 - SYS_SET_TID_ADDRESS = 5212 - SYS_RESTART_SYSCALL = 5213 - SYS_SEMTIMEDOP = 5214 - SYS_FADVISE64 = 5215 - SYS_TIMER_CREATE = 5216 - SYS_TIMER_SETTIME = 5217 - SYS_TIMER_GETTIME = 5218 - SYS_TIMER_GETOVERRUN = 5219 - SYS_TIMER_DELETE = 5220 - SYS_CLOCK_SETTIME = 5221 - SYS_CLOCK_GETTIME = 5222 - SYS_CLOCK_GETRES = 5223 - SYS_CLOCK_NANOSLEEP = 5224 - SYS_TGKILL = 5225 - SYS_UTIMES = 5226 - SYS_MBIND = 5227 - SYS_GET_MEMPOLICY = 5228 - SYS_SET_MEMPOLICY = 5229 - SYS_MQ_OPEN = 5230 - SYS_MQ_UNLINK = 5231 - SYS_MQ_TIMEDSEND = 5232 - SYS_MQ_TIMEDRECEIVE = 5233 - SYS_MQ_NOTIFY = 5234 - SYS_MQ_GETSETATTR = 5235 - SYS_VSERVER = 5236 - SYS_WAITID = 5237 - SYS_ADD_KEY = 5239 - SYS_REQUEST_KEY = 5240 - SYS_KEYCTL = 5241 - SYS_SET_THREAD_AREA = 5242 - SYS_INOTIFY_INIT = 5243 - SYS_INOTIFY_ADD_WATCH = 5244 - SYS_INOTIFY_RM_WATCH = 5245 - SYS_MIGRATE_PAGES = 5246 - SYS_OPENAT = 5247 - SYS_MKDIRAT = 5248 - SYS_MKNODAT = 5249 - SYS_FCHOWNAT = 5250 - SYS_FUTIMESAT = 5251 - SYS_NEWFSTATAT = 5252 - SYS_UNLINKAT = 5253 - SYS_RENAMEAT = 5254 - SYS_LINKAT = 5255 - SYS_SYMLINKAT = 5256 - SYS_READLINKAT = 5257 - SYS_FCHMODAT = 5258 - SYS_FACCESSAT = 5259 - SYS_PSELECT6 = 5260 - SYS_PPOLL = 5261 - SYS_UNSHARE = 5262 - SYS_SPLICE = 5263 - SYS_SYNC_FILE_RANGE = 5264 - SYS_TEE = 5265 - SYS_VMSPLICE = 5266 - SYS_MOVE_PAGES = 5267 - SYS_SET_ROBUST_LIST = 5268 - SYS_GET_ROBUST_LIST = 5269 - SYS_KEXEC_LOAD = 5270 - SYS_GETCPU = 5271 - SYS_EPOLL_PWAIT = 5272 - SYS_IOPRIO_SET = 5273 - SYS_IOPRIO_GET = 5274 - SYS_UTIMENSAT = 5275 - SYS_SIGNALFD = 5276 - SYS_TIMERFD = 5277 - SYS_EVENTFD = 5278 - SYS_FALLOCATE = 5279 - SYS_TIMERFD_CREATE = 5280 - SYS_TIMERFD_GETTIME = 5281 - SYS_TIMERFD_SETTIME = 5282 - SYS_SIGNALFD4 = 5283 - SYS_EVENTFD2 = 5284 - SYS_EPOLL_CREATE1 = 5285 - SYS_DUP3 = 5286 - SYS_PIPE2 = 5287 - SYS_INOTIFY_INIT1 = 5288 - SYS_PREADV = 5289 - SYS_PWRITEV = 5290 - SYS_RT_TGSIGQUEUEINFO = 5291 - SYS_PERF_EVENT_OPEN = 5292 - SYS_ACCEPT4 = 5293 - SYS_RECVMMSG = 5294 - SYS_FANOTIFY_INIT = 5295 - SYS_FANOTIFY_MARK = 5296 - SYS_PRLIMIT64 = 5297 - SYS_NAME_TO_HANDLE_AT = 5298 - SYS_OPEN_BY_HANDLE_AT = 5299 - SYS_CLOCK_ADJTIME = 5300 - SYS_SYNCFS = 5301 - SYS_SENDMMSG = 5302 - SYS_SETNS = 5303 - SYS_PROCESS_VM_READV = 5304 - SYS_PROCESS_VM_WRITEV = 5305 - SYS_KCMP = 5306 - SYS_FINIT_MODULE = 5307 - SYS_GETDENTS64 = 5308 - SYS_SCHED_SETATTR = 5309 - SYS_SCHED_GETATTR = 5310 - SYS_RENAMEAT2 = 5311 - SYS_SECCOMP = 5312 - SYS_GETRANDOM = 5313 - SYS_MEMFD_CREATE = 5314 - SYS_BPF = 5315 - SYS_EXECVEAT = 5316 - SYS_USERFAULTFD = 5317 - SYS_MEMBARRIER = 5318 - SYS_MLOCK2 = 5319 - SYS_COPY_FILE_RANGE = 5320 - SYS_PREADV2 = 5321 - SYS_PWRITEV2 = 5322 - SYS_PKEY_MPROTECT = 5323 - SYS_PKEY_ALLOC = 5324 - SYS_PKEY_FREE = 5325 - SYS_STATX = 5326 - SYS_RSEQ = 5327 - SYS_IO_PGETEVENTS = 5328 - SYS_PIDFD_SEND_SIGNAL = 5424 - SYS_IO_URING_SETUP = 5425 - SYS_IO_URING_ENTER = 5426 - SYS_IO_URING_REGISTER = 5427 - SYS_OPEN_TREE = 5428 - SYS_MOVE_MOUNT = 5429 - SYS_FSOPEN = 5430 - SYS_FSCONFIG = 5431 - SYS_FSMOUNT = 5432 - SYS_FSPICK = 5433 - SYS_PIDFD_OPEN = 5434 - SYS_CLONE3 = 5435 - SYS_CLOSE_RANGE = 5436 - SYS_OPENAT2 = 5437 - SYS_PIDFD_GETFD = 5438 - SYS_FACCESSAT2 = 5439 - SYS_PROCESS_MADVISE = 5440 - SYS_EPOLL_PWAIT2 = 5441 - SYS_MOUNT_SETATTR = 5442 + SYS_READ = 5000 + SYS_WRITE = 5001 + SYS_OPEN = 5002 + SYS_CLOSE = 5003 + SYS_STAT = 5004 + SYS_FSTAT = 5005 + SYS_LSTAT = 5006 + SYS_POLL = 5007 + SYS_LSEEK = 5008 + SYS_MMAP = 5009 + SYS_MPROTECT = 5010 + SYS_MUNMAP = 5011 + SYS_BRK = 5012 + SYS_RT_SIGACTION = 5013 + SYS_RT_SIGPROCMASK = 5014 + SYS_IOCTL = 5015 + SYS_PREAD64 = 5016 + SYS_PWRITE64 = 5017 + SYS_READV = 5018 + SYS_WRITEV = 5019 + SYS_ACCESS = 5020 + SYS_PIPE = 5021 + SYS__NEWSELECT = 5022 + SYS_SCHED_YIELD = 5023 + SYS_MREMAP = 5024 + SYS_MSYNC = 5025 + SYS_MINCORE = 5026 + SYS_MADVISE = 5027 + SYS_SHMGET = 5028 + SYS_SHMAT = 5029 + SYS_SHMCTL = 5030 + SYS_DUP = 5031 + SYS_DUP2 = 5032 + SYS_PAUSE = 5033 + SYS_NANOSLEEP = 5034 + SYS_GETITIMER = 5035 + SYS_SETITIMER = 5036 + SYS_ALARM = 5037 + SYS_GETPID = 5038 + SYS_SENDFILE = 5039 + SYS_SOCKET = 5040 + SYS_CONNECT = 5041 + SYS_ACCEPT = 5042 + SYS_SENDTO = 5043 + SYS_RECVFROM = 5044 + SYS_SENDMSG = 5045 + SYS_RECVMSG = 5046 + SYS_SHUTDOWN = 5047 + SYS_BIND = 5048 + SYS_LISTEN = 5049 + SYS_GETSOCKNAME = 5050 + SYS_GETPEERNAME = 5051 + SYS_SOCKETPAIR = 5052 + SYS_SETSOCKOPT = 5053 + SYS_GETSOCKOPT = 5054 + SYS_CLONE = 5055 + SYS_FORK = 5056 + SYS_EXECVE = 5057 + SYS_EXIT = 5058 + SYS_WAIT4 = 5059 + SYS_KILL = 5060 + SYS_UNAME = 5061 + SYS_SEMGET = 5062 + SYS_SEMOP = 5063 + SYS_SEMCTL = 5064 + SYS_SHMDT = 5065 + SYS_MSGGET = 5066 + SYS_MSGSND = 5067 + SYS_MSGRCV = 5068 + SYS_MSGCTL = 5069 + SYS_FCNTL = 5070 + SYS_FLOCK = 5071 + SYS_FSYNC = 5072 + SYS_FDATASYNC = 5073 + SYS_TRUNCATE = 5074 + SYS_FTRUNCATE = 5075 + SYS_GETDENTS = 5076 + SYS_GETCWD = 5077 + SYS_CHDIR = 5078 + SYS_FCHDIR = 5079 + SYS_RENAME = 5080 + SYS_MKDIR = 5081 + SYS_RMDIR = 5082 + SYS_CREAT = 5083 + SYS_LINK = 5084 + SYS_UNLINK = 5085 + SYS_SYMLINK = 5086 + SYS_READLINK = 5087 + SYS_CHMOD = 5088 + SYS_FCHMOD = 5089 + SYS_CHOWN = 5090 + SYS_FCHOWN = 5091 + SYS_LCHOWN = 5092 + SYS_UMASK = 5093 + SYS_GETTIMEOFDAY = 5094 + SYS_GETRLIMIT = 5095 + SYS_GETRUSAGE = 5096 + SYS_SYSINFO = 5097 + SYS_TIMES = 5098 + SYS_PTRACE = 5099 + SYS_GETUID = 5100 + SYS_SYSLOG = 5101 + SYS_GETGID = 5102 + SYS_SETUID = 5103 + SYS_SETGID = 5104 + SYS_GETEUID = 5105 + SYS_GETEGID = 5106 + SYS_SETPGID = 5107 + SYS_GETPPID = 5108 + SYS_GETPGRP = 5109 + SYS_SETSID = 5110 + SYS_SETREUID = 5111 + SYS_SETREGID = 5112 + SYS_GETGROUPS = 5113 + SYS_SETGROUPS = 5114 + SYS_SETRESUID = 5115 + SYS_GETRESUID = 5116 + SYS_SETRESGID = 5117 + SYS_GETRESGID = 5118 + SYS_GETPGID = 5119 + SYS_SETFSUID = 5120 + SYS_SETFSGID = 5121 + SYS_GETSID = 5122 + SYS_CAPGET = 5123 + SYS_CAPSET = 5124 + SYS_RT_SIGPENDING = 5125 + SYS_RT_SIGTIMEDWAIT = 5126 + SYS_RT_SIGQUEUEINFO = 5127 + SYS_RT_SIGSUSPEND = 5128 + SYS_SIGALTSTACK = 5129 + SYS_UTIME = 5130 + SYS_MKNOD = 5131 + SYS_PERSONALITY = 5132 + SYS_USTAT = 5133 + SYS_STATFS = 5134 + SYS_FSTATFS = 5135 + SYS_SYSFS = 5136 + SYS_GETPRIORITY = 5137 + SYS_SETPRIORITY = 5138 + SYS_SCHED_SETPARAM = 5139 + SYS_SCHED_GETPARAM = 5140 + SYS_SCHED_SETSCHEDULER = 5141 + SYS_SCHED_GETSCHEDULER = 5142 + SYS_SCHED_GET_PRIORITY_MAX = 5143 + SYS_SCHED_GET_PRIORITY_MIN = 5144 + SYS_SCHED_RR_GET_INTERVAL = 5145 + SYS_MLOCK = 5146 + SYS_MUNLOCK = 5147 + SYS_MLOCKALL = 5148 + SYS_MUNLOCKALL = 5149 + SYS_VHANGUP = 5150 + SYS_PIVOT_ROOT = 5151 + SYS__SYSCTL = 5152 + SYS_PRCTL = 5153 + SYS_ADJTIMEX = 5154 + SYS_SETRLIMIT = 5155 + SYS_CHROOT = 5156 + SYS_SYNC = 5157 + SYS_ACCT = 5158 + SYS_SETTIMEOFDAY = 5159 + SYS_MOUNT = 5160 + SYS_UMOUNT2 = 5161 + SYS_SWAPON = 5162 + SYS_SWAPOFF = 5163 + SYS_REBOOT = 5164 + SYS_SETHOSTNAME = 5165 + SYS_SETDOMAINNAME = 5166 + SYS_CREATE_MODULE = 5167 + SYS_INIT_MODULE = 5168 + SYS_DELETE_MODULE = 5169 + SYS_GET_KERNEL_SYMS = 5170 + SYS_QUERY_MODULE = 5171 + SYS_QUOTACTL = 5172 + SYS_NFSSERVCTL = 5173 + SYS_GETPMSG = 5174 + SYS_PUTPMSG = 5175 + SYS_AFS_SYSCALL = 5176 + SYS_RESERVED177 = 5177 + SYS_GETTID = 5178 + SYS_READAHEAD = 5179 + SYS_SETXATTR = 5180 + SYS_LSETXATTR = 5181 + SYS_FSETXATTR = 5182 + SYS_GETXATTR = 5183 + SYS_LGETXATTR = 5184 + SYS_FGETXATTR = 5185 + SYS_LISTXATTR = 5186 + SYS_LLISTXATTR = 5187 + SYS_FLISTXATTR = 5188 + SYS_REMOVEXATTR = 5189 + SYS_LREMOVEXATTR = 5190 + SYS_FREMOVEXATTR = 5191 + SYS_TKILL = 5192 + SYS_RESERVED193 = 5193 + SYS_FUTEX = 5194 + SYS_SCHED_SETAFFINITY = 5195 + SYS_SCHED_GETAFFINITY = 5196 + SYS_CACHEFLUSH = 5197 + SYS_CACHECTL = 5198 + SYS_SYSMIPS = 5199 + SYS_IO_SETUP = 5200 + SYS_IO_DESTROY = 5201 + SYS_IO_GETEVENTS = 5202 + SYS_IO_SUBMIT = 5203 + SYS_IO_CANCEL = 5204 + SYS_EXIT_GROUP = 5205 + SYS_LOOKUP_DCOOKIE = 5206 + SYS_EPOLL_CREATE = 5207 + SYS_EPOLL_CTL = 5208 + SYS_EPOLL_WAIT = 5209 + SYS_REMAP_FILE_PAGES = 5210 + SYS_RT_SIGRETURN = 5211 + SYS_SET_TID_ADDRESS = 5212 + SYS_RESTART_SYSCALL = 5213 + SYS_SEMTIMEDOP = 5214 + SYS_FADVISE64 = 5215 + SYS_TIMER_CREATE = 5216 + SYS_TIMER_SETTIME = 5217 + SYS_TIMER_GETTIME = 5218 + SYS_TIMER_GETOVERRUN = 5219 + SYS_TIMER_DELETE = 5220 + SYS_CLOCK_SETTIME = 5221 + SYS_CLOCK_GETTIME = 5222 + SYS_CLOCK_GETRES = 5223 + SYS_CLOCK_NANOSLEEP = 5224 + SYS_TGKILL = 5225 + SYS_UTIMES = 5226 + SYS_MBIND = 5227 + SYS_GET_MEMPOLICY = 5228 + SYS_SET_MEMPOLICY = 5229 + SYS_MQ_OPEN = 5230 + SYS_MQ_UNLINK = 5231 + SYS_MQ_TIMEDSEND = 5232 + SYS_MQ_TIMEDRECEIVE = 5233 + SYS_MQ_NOTIFY = 5234 + SYS_MQ_GETSETATTR = 5235 + SYS_VSERVER = 5236 + SYS_WAITID = 5237 + SYS_ADD_KEY = 5239 + SYS_REQUEST_KEY = 5240 + SYS_KEYCTL = 5241 + SYS_SET_THREAD_AREA = 5242 + SYS_INOTIFY_INIT = 5243 + SYS_INOTIFY_ADD_WATCH = 5244 + SYS_INOTIFY_RM_WATCH = 5245 + SYS_MIGRATE_PAGES = 5246 + SYS_OPENAT = 5247 + SYS_MKDIRAT = 5248 + SYS_MKNODAT = 5249 + SYS_FCHOWNAT = 5250 + SYS_FUTIMESAT = 5251 + SYS_NEWFSTATAT = 5252 + SYS_UNLINKAT = 5253 + SYS_RENAMEAT = 5254 + SYS_LINKAT = 5255 + SYS_SYMLINKAT = 5256 + SYS_READLINKAT = 5257 + SYS_FCHMODAT = 5258 + SYS_FACCESSAT = 5259 + SYS_PSELECT6 = 5260 + SYS_PPOLL = 5261 + SYS_UNSHARE = 5262 + SYS_SPLICE = 5263 + SYS_SYNC_FILE_RANGE = 5264 + SYS_TEE = 5265 + SYS_VMSPLICE = 5266 + SYS_MOVE_PAGES = 5267 + SYS_SET_ROBUST_LIST = 5268 + SYS_GET_ROBUST_LIST = 5269 + SYS_KEXEC_LOAD = 5270 + SYS_GETCPU = 5271 + SYS_EPOLL_PWAIT = 5272 + SYS_IOPRIO_SET = 5273 + SYS_IOPRIO_GET = 5274 + SYS_UTIMENSAT = 5275 + SYS_SIGNALFD = 5276 + SYS_TIMERFD = 5277 + SYS_EVENTFD = 5278 + SYS_FALLOCATE = 5279 + SYS_TIMERFD_CREATE = 5280 + SYS_TIMERFD_GETTIME = 5281 + SYS_TIMERFD_SETTIME = 5282 + SYS_SIGNALFD4 = 5283 + SYS_EVENTFD2 = 5284 + SYS_EPOLL_CREATE1 = 5285 + SYS_DUP3 = 5286 + SYS_PIPE2 = 5287 + SYS_INOTIFY_INIT1 = 5288 + SYS_PREADV = 5289 + SYS_PWRITEV = 5290 + SYS_RT_TGSIGQUEUEINFO = 5291 + SYS_PERF_EVENT_OPEN = 5292 + SYS_ACCEPT4 = 5293 + SYS_RECVMMSG = 5294 + SYS_FANOTIFY_INIT = 5295 + SYS_FANOTIFY_MARK = 5296 + SYS_PRLIMIT64 = 5297 + SYS_NAME_TO_HANDLE_AT = 5298 + SYS_OPEN_BY_HANDLE_AT = 5299 + SYS_CLOCK_ADJTIME = 5300 + SYS_SYNCFS = 5301 + SYS_SENDMMSG = 5302 + SYS_SETNS = 5303 + SYS_PROCESS_VM_READV = 5304 + SYS_PROCESS_VM_WRITEV = 5305 + SYS_KCMP = 5306 + SYS_FINIT_MODULE = 5307 + SYS_GETDENTS64 = 5308 + SYS_SCHED_SETATTR = 5309 + SYS_SCHED_GETATTR = 5310 + SYS_RENAMEAT2 = 5311 + SYS_SECCOMP = 5312 + SYS_GETRANDOM = 5313 + SYS_MEMFD_CREATE = 5314 + SYS_BPF = 5315 + SYS_EXECVEAT = 5316 + SYS_USERFAULTFD = 5317 + SYS_MEMBARRIER = 5318 + SYS_MLOCK2 = 5319 + SYS_COPY_FILE_RANGE = 5320 + SYS_PREADV2 = 5321 + SYS_PWRITEV2 = 5322 + SYS_PKEY_MPROTECT = 5323 + SYS_PKEY_ALLOC = 5324 + SYS_PKEY_FREE = 5325 + SYS_STATX = 5326 + SYS_RSEQ = 5327 + SYS_IO_PGETEVENTS = 5328 + SYS_PIDFD_SEND_SIGNAL = 5424 + SYS_IO_URING_SETUP = 5425 + SYS_IO_URING_ENTER = 5426 + SYS_IO_URING_REGISTER = 5427 + SYS_OPEN_TREE = 5428 + SYS_MOVE_MOUNT = 5429 + SYS_FSOPEN = 5430 + SYS_FSCONFIG = 5431 + SYS_FSMOUNT = 5432 + SYS_FSPICK = 5433 + SYS_PIDFD_OPEN = 5434 + SYS_CLONE3 = 5435 + SYS_CLOSE_RANGE = 5436 + SYS_OPENAT2 = 5437 + SYS_PIDFD_GETFD = 5438 + SYS_FACCESSAT2 = 5439 + SYS_PROCESS_MADVISE = 5440 + SYS_EPOLL_PWAIT2 = 5441 + SYS_MOUNT_SETATTR = 5442 + SYS_QUOTACTL_FD = 5443 + SYS_LANDLOCK_CREATE_RULESET = 5444 + SYS_LANDLOCK_ADD_RULE = 5445 + SYS_LANDLOCK_RESTRICT_SELF = 5446 + SYS_PROCESS_MRELEASE = 5448 + SYS_FUTEX_WAITV = 5449 + SYS_SET_MEMPOLICY_HOME_NODE = 5450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 46077689a..e26a7c765 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -7,351 +7,358 @@ package unix const ( - SYS_READ = 5000 - SYS_WRITE = 5001 - SYS_OPEN = 5002 - SYS_CLOSE = 5003 - SYS_STAT = 5004 - SYS_FSTAT = 5005 - SYS_LSTAT = 5006 - SYS_POLL = 5007 - SYS_LSEEK = 5008 - SYS_MMAP = 5009 - SYS_MPROTECT = 5010 - SYS_MUNMAP = 5011 - SYS_BRK = 5012 - SYS_RT_SIGACTION = 5013 - SYS_RT_SIGPROCMASK = 5014 - SYS_IOCTL = 5015 - SYS_PREAD64 = 5016 - SYS_PWRITE64 = 5017 - SYS_READV = 5018 - SYS_WRITEV = 5019 - SYS_ACCESS = 5020 - SYS_PIPE = 5021 - SYS__NEWSELECT = 5022 - SYS_SCHED_YIELD = 5023 - SYS_MREMAP = 5024 - SYS_MSYNC = 5025 - SYS_MINCORE = 5026 - SYS_MADVISE = 5027 - SYS_SHMGET = 5028 - SYS_SHMAT = 5029 - SYS_SHMCTL = 5030 - SYS_DUP = 5031 - SYS_DUP2 = 5032 - SYS_PAUSE = 5033 - SYS_NANOSLEEP = 5034 - SYS_GETITIMER = 5035 - SYS_SETITIMER = 5036 - SYS_ALARM = 5037 - SYS_GETPID = 5038 - SYS_SENDFILE = 5039 - SYS_SOCKET = 5040 - SYS_CONNECT = 5041 - SYS_ACCEPT = 5042 - SYS_SENDTO = 5043 - SYS_RECVFROM = 5044 - SYS_SENDMSG = 5045 - SYS_RECVMSG = 5046 - SYS_SHUTDOWN = 5047 - SYS_BIND = 5048 - SYS_LISTEN = 5049 - SYS_GETSOCKNAME = 5050 - SYS_GETPEERNAME = 5051 - SYS_SOCKETPAIR = 5052 - SYS_SETSOCKOPT = 5053 - SYS_GETSOCKOPT = 5054 - SYS_CLONE = 5055 - SYS_FORK = 5056 - SYS_EXECVE = 5057 - SYS_EXIT = 5058 - SYS_WAIT4 = 5059 - SYS_KILL = 5060 - SYS_UNAME = 5061 - SYS_SEMGET = 5062 - SYS_SEMOP = 5063 - SYS_SEMCTL = 5064 - SYS_SHMDT = 5065 - SYS_MSGGET = 5066 - SYS_MSGSND = 5067 - SYS_MSGRCV = 5068 - SYS_MSGCTL = 5069 - SYS_FCNTL = 5070 - SYS_FLOCK = 5071 - SYS_FSYNC = 5072 - SYS_FDATASYNC = 5073 - SYS_TRUNCATE = 5074 - SYS_FTRUNCATE = 5075 - SYS_GETDENTS = 5076 - SYS_GETCWD = 5077 - SYS_CHDIR = 5078 - SYS_FCHDIR = 5079 - SYS_RENAME = 5080 - SYS_MKDIR = 5081 - SYS_RMDIR = 5082 - SYS_CREAT = 5083 - SYS_LINK = 5084 - SYS_UNLINK = 5085 - SYS_SYMLINK = 5086 - SYS_READLINK = 5087 - SYS_CHMOD = 5088 - SYS_FCHMOD = 5089 - SYS_CHOWN = 5090 - SYS_FCHOWN = 5091 - SYS_LCHOWN = 5092 - SYS_UMASK = 5093 - SYS_GETTIMEOFDAY = 5094 - SYS_GETRLIMIT = 5095 - SYS_GETRUSAGE = 5096 - SYS_SYSINFO = 5097 - SYS_TIMES = 5098 - SYS_PTRACE = 5099 - SYS_GETUID = 5100 - SYS_SYSLOG = 5101 - SYS_GETGID = 5102 - SYS_SETUID = 5103 - SYS_SETGID = 5104 - SYS_GETEUID = 5105 - SYS_GETEGID = 5106 - SYS_SETPGID = 5107 - SYS_GETPPID = 5108 - SYS_GETPGRP = 5109 - SYS_SETSID = 5110 - SYS_SETREUID = 5111 - SYS_SETREGID = 5112 - SYS_GETGROUPS = 5113 - SYS_SETGROUPS = 5114 - SYS_SETRESUID = 5115 - SYS_GETRESUID = 5116 - SYS_SETRESGID = 5117 - SYS_GETRESGID = 5118 - SYS_GETPGID = 5119 - SYS_SETFSUID = 5120 - SYS_SETFSGID = 5121 - SYS_GETSID = 5122 - SYS_CAPGET = 5123 - SYS_CAPSET = 5124 - SYS_RT_SIGPENDING = 5125 - SYS_RT_SIGTIMEDWAIT = 5126 - SYS_RT_SIGQUEUEINFO = 5127 - SYS_RT_SIGSUSPEND = 5128 - SYS_SIGALTSTACK = 5129 - SYS_UTIME = 5130 - SYS_MKNOD = 5131 - SYS_PERSONALITY = 5132 - SYS_USTAT = 5133 - SYS_STATFS = 5134 - SYS_FSTATFS = 5135 - SYS_SYSFS = 5136 - SYS_GETPRIORITY = 5137 - SYS_SETPRIORITY = 5138 - SYS_SCHED_SETPARAM = 5139 - SYS_SCHED_GETPARAM = 5140 - SYS_SCHED_SETSCHEDULER = 5141 - SYS_SCHED_GETSCHEDULER = 5142 - SYS_SCHED_GET_PRIORITY_MAX = 5143 - SYS_SCHED_GET_PRIORITY_MIN = 5144 - SYS_SCHED_RR_GET_INTERVAL = 5145 - SYS_MLOCK = 5146 - SYS_MUNLOCK = 5147 - SYS_MLOCKALL = 5148 - SYS_MUNLOCKALL = 5149 - SYS_VHANGUP = 5150 - SYS_PIVOT_ROOT = 5151 - SYS__SYSCTL = 5152 - SYS_PRCTL = 5153 - SYS_ADJTIMEX = 5154 - SYS_SETRLIMIT = 5155 - SYS_CHROOT = 5156 - SYS_SYNC = 5157 - SYS_ACCT = 5158 - SYS_SETTIMEOFDAY = 5159 - SYS_MOUNT = 5160 - SYS_UMOUNT2 = 5161 - SYS_SWAPON = 5162 - SYS_SWAPOFF = 5163 - SYS_REBOOT = 5164 - SYS_SETHOSTNAME = 5165 - SYS_SETDOMAINNAME = 5166 - SYS_CREATE_MODULE = 5167 - SYS_INIT_MODULE = 5168 - SYS_DELETE_MODULE = 5169 - SYS_GET_KERNEL_SYMS = 5170 - SYS_QUERY_MODULE = 5171 - SYS_QUOTACTL = 5172 - SYS_NFSSERVCTL = 5173 - SYS_GETPMSG = 5174 - SYS_PUTPMSG = 5175 - SYS_AFS_SYSCALL = 5176 - SYS_RESERVED177 = 5177 - SYS_GETTID = 5178 - SYS_READAHEAD = 5179 - SYS_SETXATTR = 5180 - SYS_LSETXATTR = 5181 - SYS_FSETXATTR = 5182 - SYS_GETXATTR = 5183 - SYS_LGETXATTR = 5184 - SYS_FGETXATTR = 5185 - SYS_LISTXATTR = 5186 - SYS_LLISTXATTR = 5187 - SYS_FLISTXATTR = 5188 - SYS_REMOVEXATTR = 5189 - SYS_LREMOVEXATTR = 5190 - SYS_FREMOVEXATTR = 5191 - SYS_TKILL = 5192 - SYS_RESERVED193 = 5193 - SYS_FUTEX = 5194 - SYS_SCHED_SETAFFINITY = 5195 - SYS_SCHED_GETAFFINITY = 5196 - SYS_CACHEFLUSH = 5197 - SYS_CACHECTL = 5198 - SYS_SYSMIPS = 5199 - SYS_IO_SETUP = 5200 - SYS_IO_DESTROY = 5201 - SYS_IO_GETEVENTS = 5202 - SYS_IO_SUBMIT = 5203 - SYS_IO_CANCEL = 5204 - SYS_EXIT_GROUP = 5205 - SYS_LOOKUP_DCOOKIE = 5206 - SYS_EPOLL_CREATE = 5207 - SYS_EPOLL_CTL = 5208 - SYS_EPOLL_WAIT = 5209 - SYS_REMAP_FILE_PAGES = 5210 - SYS_RT_SIGRETURN = 5211 - SYS_SET_TID_ADDRESS = 5212 - SYS_RESTART_SYSCALL = 5213 - SYS_SEMTIMEDOP = 5214 - SYS_FADVISE64 = 5215 - SYS_TIMER_CREATE = 5216 - SYS_TIMER_SETTIME = 5217 - SYS_TIMER_GETTIME = 5218 - SYS_TIMER_GETOVERRUN = 5219 - SYS_TIMER_DELETE = 5220 - SYS_CLOCK_SETTIME = 5221 - SYS_CLOCK_GETTIME = 5222 - SYS_CLOCK_GETRES = 5223 - SYS_CLOCK_NANOSLEEP = 5224 - SYS_TGKILL = 5225 - SYS_UTIMES = 5226 - SYS_MBIND = 5227 - SYS_GET_MEMPOLICY = 5228 - SYS_SET_MEMPOLICY = 5229 - SYS_MQ_OPEN = 5230 - SYS_MQ_UNLINK = 5231 - SYS_MQ_TIMEDSEND = 5232 - SYS_MQ_TIMEDRECEIVE = 5233 - SYS_MQ_NOTIFY = 5234 - SYS_MQ_GETSETATTR = 5235 - SYS_VSERVER = 5236 - SYS_WAITID = 5237 - SYS_ADD_KEY = 5239 - SYS_REQUEST_KEY = 5240 - SYS_KEYCTL = 5241 - SYS_SET_THREAD_AREA = 5242 - SYS_INOTIFY_INIT = 5243 - SYS_INOTIFY_ADD_WATCH = 5244 - SYS_INOTIFY_RM_WATCH = 5245 - SYS_MIGRATE_PAGES = 5246 - SYS_OPENAT = 5247 - SYS_MKDIRAT = 5248 - SYS_MKNODAT = 5249 - SYS_FCHOWNAT = 5250 - SYS_FUTIMESAT = 5251 - SYS_NEWFSTATAT = 5252 - SYS_UNLINKAT = 5253 - SYS_RENAMEAT = 5254 - SYS_LINKAT = 5255 - SYS_SYMLINKAT = 5256 - SYS_READLINKAT = 5257 - SYS_FCHMODAT = 5258 - SYS_FACCESSAT = 5259 - SYS_PSELECT6 = 5260 - SYS_PPOLL = 5261 - SYS_UNSHARE = 5262 - SYS_SPLICE = 5263 - SYS_SYNC_FILE_RANGE = 5264 - SYS_TEE = 5265 - SYS_VMSPLICE = 5266 - SYS_MOVE_PAGES = 5267 - SYS_SET_ROBUST_LIST = 5268 - SYS_GET_ROBUST_LIST = 5269 - SYS_KEXEC_LOAD = 5270 - SYS_GETCPU = 5271 - SYS_EPOLL_PWAIT = 5272 - SYS_IOPRIO_SET = 5273 - SYS_IOPRIO_GET = 5274 - SYS_UTIMENSAT = 5275 - SYS_SIGNALFD = 5276 - SYS_TIMERFD = 5277 - SYS_EVENTFD = 5278 - SYS_FALLOCATE = 5279 - SYS_TIMERFD_CREATE = 5280 - SYS_TIMERFD_GETTIME = 5281 - SYS_TIMERFD_SETTIME = 5282 - SYS_SIGNALFD4 = 5283 - SYS_EVENTFD2 = 5284 - SYS_EPOLL_CREATE1 = 5285 - SYS_DUP3 = 5286 - SYS_PIPE2 = 5287 - SYS_INOTIFY_INIT1 = 5288 - SYS_PREADV = 5289 - SYS_PWRITEV = 5290 - SYS_RT_TGSIGQUEUEINFO = 5291 - SYS_PERF_EVENT_OPEN = 5292 - SYS_ACCEPT4 = 5293 - SYS_RECVMMSG = 5294 - SYS_FANOTIFY_INIT = 5295 - SYS_FANOTIFY_MARK = 5296 - SYS_PRLIMIT64 = 5297 - SYS_NAME_TO_HANDLE_AT = 5298 - SYS_OPEN_BY_HANDLE_AT = 5299 - SYS_CLOCK_ADJTIME = 5300 - SYS_SYNCFS = 5301 - SYS_SENDMMSG = 5302 - SYS_SETNS = 5303 - SYS_PROCESS_VM_READV = 5304 - SYS_PROCESS_VM_WRITEV = 5305 - SYS_KCMP = 5306 - SYS_FINIT_MODULE = 5307 - SYS_GETDENTS64 = 5308 - SYS_SCHED_SETATTR = 5309 - SYS_SCHED_GETATTR = 5310 - SYS_RENAMEAT2 = 5311 - SYS_SECCOMP = 5312 - SYS_GETRANDOM = 5313 - SYS_MEMFD_CREATE = 5314 - SYS_BPF = 5315 - SYS_EXECVEAT = 5316 - SYS_USERFAULTFD = 5317 - SYS_MEMBARRIER = 5318 - SYS_MLOCK2 = 5319 - SYS_COPY_FILE_RANGE = 5320 - SYS_PREADV2 = 5321 - SYS_PWRITEV2 = 5322 - SYS_PKEY_MPROTECT = 5323 - SYS_PKEY_ALLOC = 5324 - SYS_PKEY_FREE = 5325 - SYS_STATX = 5326 - SYS_RSEQ = 5327 - SYS_IO_PGETEVENTS = 5328 - SYS_PIDFD_SEND_SIGNAL = 5424 - SYS_IO_URING_SETUP = 5425 - SYS_IO_URING_ENTER = 5426 - SYS_IO_URING_REGISTER = 5427 - SYS_OPEN_TREE = 5428 - SYS_MOVE_MOUNT = 5429 - SYS_FSOPEN = 5430 - SYS_FSCONFIG = 5431 - SYS_FSMOUNT = 5432 - SYS_FSPICK = 5433 - SYS_PIDFD_OPEN = 5434 - SYS_CLONE3 = 5435 - SYS_CLOSE_RANGE = 5436 - SYS_OPENAT2 = 5437 - SYS_PIDFD_GETFD = 5438 - SYS_FACCESSAT2 = 5439 - SYS_PROCESS_MADVISE = 5440 - SYS_EPOLL_PWAIT2 = 5441 - SYS_MOUNT_SETATTR = 5442 + SYS_READ = 5000 + SYS_WRITE = 5001 + SYS_OPEN = 5002 + SYS_CLOSE = 5003 + SYS_STAT = 5004 + SYS_FSTAT = 5005 + SYS_LSTAT = 5006 + SYS_POLL = 5007 + SYS_LSEEK = 5008 + SYS_MMAP = 5009 + SYS_MPROTECT = 5010 + SYS_MUNMAP = 5011 + SYS_BRK = 5012 + SYS_RT_SIGACTION = 5013 + SYS_RT_SIGPROCMASK = 5014 + SYS_IOCTL = 5015 + SYS_PREAD64 = 5016 + SYS_PWRITE64 = 5017 + SYS_READV = 5018 + SYS_WRITEV = 5019 + SYS_ACCESS = 5020 + SYS_PIPE = 5021 + SYS__NEWSELECT = 5022 + SYS_SCHED_YIELD = 5023 + SYS_MREMAP = 5024 + SYS_MSYNC = 5025 + SYS_MINCORE = 5026 + SYS_MADVISE = 5027 + SYS_SHMGET = 5028 + SYS_SHMAT = 5029 + SYS_SHMCTL = 5030 + SYS_DUP = 5031 + SYS_DUP2 = 5032 + SYS_PAUSE = 5033 + SYS_NANOSLEEP = 5034 + SYS_GETITIMER = 5035 + SYS_SETITIMER = 5036 + SYS_ALARM = 5037 + SYS_GETPID = 5038 + SYS_SENDFILE = 5039 + SYS_SOCKET = 5040 + SYS_CONNECT = 5041 + SYS_ACCEPT = 5042 + SYS_SENDTO = 5043 + SYS_RECVFROM = 5044 + SYS_SENDMSG = 5045 + SYS_RECVMSG = 5046 + SYS_SHUTDOWN = 5047 + SYS_BIND = 5048 + SYS_LISTEN = 5049 + SYS_GETSOCKNAME = 5050 + SYS_GETPEERNAME = 5051 + SYS_SOCKETPAIR = 5052 + SYS_SETSOCKOPT = 5053 + SYS_GETSOCKOPT = 5054 + SYS_CLONE = 5055 + SYS_FORK = 5056 + SYS_EXECVE = 5057 + SYS_EXIT = 5058 + SYS_WAIT4 = 5059 + SYS_KILL = 5060 + SYS_UNAME = 5061 + SYS_SEMGET = 5062 + SYS_SEMOP = 5063 + SYS_SEMCTL = 5064 + SYS_SHMDT = 5065 + SYS_MSGGET = 5066 + SYS_MSGSND = 5067 + SYS_MSGRCV = 5068 + SYS_MSGCTL = 5069 + SYS_FCNTL = 5070 + SYS_FLOCK = 5071 + SYS_FSYNC = 5072 + SYS_FDATASYNC = 5073 + SYS_TRUNCATE = 5074 + SYS_FTRUNCATE = 5075 + SYS_GETDENTS = 5076 + SYS_GETCWD = 5077 + SYS_CHDIR = 5078 + SYS_FCHDIR = 5079 + SYS_RENAME = 5080 + SYS_MKDIR = 5081 + SYS_RMDIR = 5082 + SYS_CREAT = 5083 + SYS_LINK = 5084 + SYS_UNLINK = 5085 + SYS_SYMLINK = 5086 + SYS_READLINK = 5087 + SYS_CHMOD = 5088 + SYS_FCHMOD = 5089 + SYS_CHOWN = 5090 + SYS_FCHOWN = 5091 + SYS_LCHOWN = 5092 + SYS_UMASK = 5093 + SYS_GETTIMEOFDAY = 5094 + SYS_GETRLIMIT = 5095 + SYS_GETRUSAGE = 5096 + SYS_SYSINFO = 5097 + SYS_TIMES = 5098 + SYS_PTRACE = 5099 + SYS_GETUID = 5100 + SYS_SYSLOG = 5101 + SYS_GETGID = 5102 + SYS_SETUID = 5103 + SYS_SETGID = 5104 + SYS_GETEUID = 5105 + SYS_GETEGID = 5106 + SYS_SETPGID = 5107 + SYS_GETPPID = 5108 + SYS_GETPGRP = 5109 + SYS_SETSID = 5110 + SYS_SETREUID = 5111 + SYS_SETREGID = 5112 + SYS_GETGROUPS = 5113 + SYS_SETGROUPS = 5114 + SYS_SETRESUID = 5115 + SYS_GETRESUID = 5116 + SYS_SETRESGID = 5117 + SYS_GETRESGID = 5118 + SYS_GETPGID = 5119 + SYS_SETFSUID = 5120 + SYS_SETFSGID = 5121 + SYS_GETSID = 5122 + SYS_CAPGET = 5123 + SYS_CAPSET = 5124 + SYS_RT_SIGPENDING = 5125 + SYS_RT_SIGTIMEDWAIT = 5126 + SYS_RT_SIGQUEUEINFO = 5127 + SYS_RT_SIGSUSPEND = 5128 + SYS_SIGALTSTACK = 5129 + SYS_UTIME = 5130 + SYS_MKNOD = 5131 + SYS_PERSONALITY = 5132 + SYS_USTAT = 5133 + SYS_STATFS = 5134 + SYS_FSTATFS = 5135 + SYS_SYSFS = 5136 + SYS_GETPRIORITY = 5137 + SYS_SETPRIORITY = 5138 + SYS_SCHED_SETPARAM = 5139 + SYS_SCHED_GETPARAM = 5140 + SYS_SCHED_SETSCHEDULER = 5141 + SYS_SCHED_GETSCHEDULER = 5142 + SYS_SCHED_GET_PRIORITY_MAX = 5143 + SYS_SCHED_GET_PRIORITY_MIN = 5144 + SYS_SCHED_RR_GET_INTERVAL = 5145 + SYS_MLOCK = 5146 + SYS_MUNLOCK = 5147 + SYS_MLOCKALL = 5148 + SYS_MUNLOCKALL = 5149 + SYS_VHANGUP = 5150 + SYS_PIVOT_ROOT = 5151 + SYS__SYSCTL = 5152 + SYS_PRCTL = 5153 + SYS_ADJTIMEX = 5154 + SYS_SETRLIMIT = 5155 + SYS_CHROOT = 5156 + SYS_SYNC = 5157 + SYS_ACCT = 5158 + SYS_SETTIMEOFDAY = 5159 + SYS_MOUNT = 5160 + SYS_UMOUNT2 = 5161 + SYS_SWAPON = 5162 + SYS_SWAPOFF = 5163 + SYS_REBOOT = 5164 + SYS_SETHOSTNAME = 5165 + SYS_SETDOMAINNAME = 5166 + SYS_CREATE_MODULE = 5167 + SYS_INIT_MODULE = 5168 + SYS_DELETE_MODULE = 5169 + SYS_GET_KERNEL_SYMS = 5170 + SYS_QUERY_MODULE = 5171 + SYS_QUOTACTL = 5172 + SYS_NFSSERVCTL = 5173 + SYS_GETPMSG = 5174 + SYS_PUTPMSG = 5175 + SYS_AFS_SYSCALL = 5176 + SYS_RESERVED177 = 5177 + SYS_GETTID = 5178 + SYS_READAHEAD = 5179 + SYS_SETXATTR = 5180 + SYS_LSETXATTR = 5181 + SYS_FSETXATTR = 5182 + SYS_GETXATTR = 5183 + SYS_LGETXATTR = 5184 + SYS_FGETXATTR = 5185 + SYS_LISTXATTR = 5186 + SYS_LLISTXATTR = 5187 + SYS_FLISTXATTR = 5188 + SYS_REMOVEXATTR = 5189 + SYS_LREMOVEXATTR = 5190 + SYS_FREMOVEXATTR = 5191 + SYS_TKILL = 5192 + SYS_RESERVED193 = 5193 + SYS_FUTEX = 5194 + SYS_SCHED_SETAFFINITY = 5195 + SYS_SCHED_GETAFFINITY = 5196 + SYS_CACHEFLUSH = 5197 + SYS_CACHECTL = 5198 + SYS_SYSMIPS = 5199 + SYS_IO_SETUP = 5200 + SYS_IO_DESTROY = 5201 + SYS_IO_GETEVENTS = 5202 + SYS_IO_SUBMIT = 5203 + SYS_IO_CANCEL = 5204 + SYS_EXIT_GROUP = 5205 + SYS_LOOKUP_DCOOKIE = 5206 + SYS_EPOLL_CREATE = 5207 + SYS_EPOLL_CTL = 5208 + SYS_EPOLL_WAIT = 5209 + SYS_REMAP_FILE_PAGES = 5210 + SYS_RT_SIGRETURN = 5211 + SYS_SET_TID_ADDRESS = 5212 + SYS_RESTART_SYSCALL = 5213 + SYS_SEMTIMEDOP = 5214 + SYS_FADVISE64 = 5215 + SYS_TIMER_CREATE = 5216 + SYS_TIMER_SETTIME = 5217 + SYS_TIMER_GETTIME = 5218 + SYS_TIMER_GETOVERRUN = 5219 + SYS_TIMER_DELETE = 5220 + SYS_CLOCK_SETTIME = 5221 + SYS_CLOCK_GETTIME = 5222 + SYS_CLOCK_GETRES = 5223 + SYS_CLOCK_NANOSLEEP = 5224 + SYS_TGKILL = 5225 + SYS_UTIMES = 5226 + SYS_MBIND = 5227 + SYS_GET_MEMPOLICY = 5228 + SYS_SET_MEMPOLICY = 5229 + SYS_MQ_OPEN = 5230 + SYS_MQ_UNLINK = 5231 + SYS_MQ_TIMEDSEND = 5232 + SYS_MQ_TIMEDRECEIVE = 5233 + SYS_MQ_NOTIFY = 5234 + SYS_MQ_GETSETATTR = 5235 + SYS_VSERVER = 5236 + SYS_WAITID = 5237 + SYS_ADD_KEY = 5239 + SYS_REQUEST_KEY = 5240 + SYS_KEYCTL = 5241 + SYS_SET_THREAD_AREA = 5242 + SYS_INOTIFY_INIT = 5243 + SYS_INOTIFY_ADD_WATCH = 5244 + SYS_INOTIFY_RM_WATCH = 5245 + SYS_MIGRATE_PAGES = 5246 + SYS_OPENAT = 5247 + SYS_MKDIRAT = 5248 + SYS_MKNODAT = 5249 + SYS_FCHOWNAT = 5250 + SYS_FUTIMESAT = 5251 + SYS_NEWFSTATAT = 5252 + SYS_UNLINKAT = 5253 + SYS_RENAMEAT = 5254 + SYS_LINKAT = 5255 + SYS_SYMLINKAT = 5256 + SYS_READLINKAT = 5257 + SYS_FCHMODAT = 5258 + SYS_FACCESSAT = 5259 + SYS_PSELECT6 = 5260 + SYS_PPOLL = 5261 + SYS_UNSHARE = 5262 + SYS_SPLICE = 5263 + SYS_SYNC_FILE_RANGE = 5264 + SYS_TEE = 5265 + SYS_VMSPLICE = 5266 + SYS_MOVE_PAGES = 5267 + SYS_SET_ROBUST_LIST = 5268 + SYS_GET_ROBUST_LIST = 5269 + SYS_KEXEC_LOAD = 5270 + SYS_GETCPU = 5271 + SYS_EPOLL_PWAIT = 5272 + SYS_IOPRIO_SET = 5273 + SYS_IOPRIO_GET = 5274 + SYS_UTIMENSAT = 5275 + SYS_SIGNALFD = 5276 + SYS_TIMERFD = 5277 + SYS_EVENTFD = 5278 + SYS_FALLOCATE = 5279 + SYS_TIMERFD_CREATE = 5280 + SYS_TIMERFD_GETTIME = 5281 + SYS_TIMERFD_SETTIME = 5282 + SYS_SIGNALFD4 = 5283 + SYS_EVENTFD2 = 5284 + SYS_EPOLL_CREATE1 = 5285 + SYS_DUP3 = 5286 + SYS_PIPE2 = 5287 + SYS_INOTIFY_INIT1 = 5288 + SYS_PREADV = 5289 + SYS_PWRITEV = 5290 + SYS_RT_TGSIGQUEUEINFO = 5291 + SYS_PERF_EVENT_OPEN = 5292 + SYS_ACCEPT4 = 5293 + SYS_RECVMMSG = 5294 + SYS_FANOTIFY_INIT = 5295 + SYS_FANOTIFY_MARK = 5296 + SYS_PRLIMIT64 = 5297 + SYS_NAME_TO_HANDLE_AT = 5298 + SYS_OPEN_BY_HANDLE_AT = 5299 + SYS_CLOCK_ADJTIME = 5300 + SYS_SYNCFS = 5301 + SYS_SENDMMSG = 5302 + SYS_SETNS = 5303 + SYS_PROCESS_VM_READV = 5304 + SYS_PROCESS_VM_WRITEV = 5305 + SYS_KCMP = 5306 + SYS_FINIT_MODULE = 5307 + SYS_GETDENTS64 = 5308 + SYS_SCHED_SETATTR = 5309 + SYS_SCHED_GETATTR = 5310 + SYS_RENAMEAT2 = 5311 + SYS_SECCOMP = 5312 + SYS_GETRANDOM = 5313 + SYS_MEMFD_CREATE = 5314 + SYS_BPF = 5315 + SYS_EXECVEAT = 5316 + SYS_USERFAULTFD = 5317 + SYS_MEMBARRIER = 5318 + SYS_MLOCK2 = 5319 + SYS_COPY_FILE_RANGE = 5320 + SYS_PREADV2 = 5321 + SYS_PWRITEV2 = 5322 + SYS_PKEY_MPROTECT = 5323 + SYS_PKEY_ALLOC = 5324 + SYS_PKEY_FREE = 5325 + SYS_STATX = 5326 + SYS_RSEQ = 5327 + SYS_IO_PGETEVENTS = 5328 + SYS_PIDFD_SEND_SIGNAL = 5424 + SYS_IO_URING_SETUP = 5425 + SYS_IO_URING_ENTER = 5426 + SYS_IO_URING_REGISTER = 5427 + SYS_OPEN_TREE = 5428 + SYS_MOVE_MOUNT = 5429 + SYS_FSOPEN = 5430 + SYS_FSCONFIG = 5431 + SYS_FSMOUNT = 5432 + SYS_FSPICK = 5433 + SYS_PIDFD_OPEN = 5434 + SYS_CLONE3 = 5435 + SYS_CLOSE_RANGE = 5436 + SYS_OPENAT2 = 5437 + SYS_PIDFD_GETFD = 5438 + SYS_FACCESSAT2 = 5439 + SYS_PROCESS_MADVISE = 5440 + SYS_EPOLL_PWAIT2 = 5441 + SYS_MOUNT_SETATTR = 5442 + SYS_QUOTACTL_FD = 5443 + SYS_LANDLOCK_CREATE_RULESET = 5444 + SYS_LANDLOCK_ADD_RULE = 5445 + SYS_LANDLOCK_RESTRICT_SELF = 5446 + SYS_PROCESS_MRELEASE = 5448 + SYS_FUTEX_WAITV = 5449 + SYS_SET_MEMPOLICY_HOME_NODE = 5450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 80e6696b3..26447260a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -424,4 +424,11 @@ const ( SYS_PROCESS_MADVISE = 4440 SYS_EPOLL_PWAIT2 = 4441 SYS_MOUNT_SETATTR = 4442 + SYS_QUOTACTL_FD = 4443 + SYS_LANDLOCK_CREATE_RULESET = 4444 + SYS_LANDLOCK_ADD_RULE = 4445 + SYS_LANDLOCK_RESTRICT_SELF = 4446 + SYS_PROCESS_MRELEASE = 4448 + SYS_FUTEX_WAITV = 4449 + SYS_SET_MEMPOLICY_HOME_NODE = 4450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index b9d697ffb..26aefc186 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -431,4 +431,11 @@ const ( SYS_PROCESS_MADVISE = 440 SYS_EPOLL_PWAIT2 = 441 SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 08edc54d3..8d4cd9d99 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -7,400 +7,407 @@ package unix const ( - SYS_RESTART_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAITPID = 7 - SYS_CREAT = 8 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_EXECVE = 11 - SYS_CHDIR = 12 - SYS_TIME = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_LCHOWN = 16 - SYS_BREAK = 17 - SYS_OLDSTAT = 18 - SYS_LSEEK = 19 - SYS_GETPID = 20 - SYS_MOUNT = 21 - SYS_UMOUNT = 22 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_STIME = 25 - SYS_PTRACE = 26 - SYS_ALARM = 27 - SYS_OLDFSTAT = 28 - SYS_PAUSE = 29 - SYS_UTIME = 30 - SYS_STTY = 31 - SYS_GTTY = 32 - SYS_ACCESS = 33 - SYS_NICE = 34 - SYS_FTIME = 35 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_RENAME = 38 - SYS_MKDIR = 39 - SYS_RMDIR = 40 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_TIMES = 43 - SYS_PROF = 44 - SYS_BRK = 45 - SYS_SETGID = 46 - SYS_GETGID = 47 - SYS_SIGNAL = 48 - SYS_GETEUID = 49 - SYS_GETEGID = 50 - SYS_ACCT = 51 - SYS_UMOUNT2 = 52 - SYS_LOCK = 53 - SYS_IOCTL = 54 - SYS_FCNTL = 55 - SYS_MPX = 56 - SYS_SETPGID = 57 - SYS_ULIMIT = 58 - SYS_OLDOLDUNAME = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_USTAT = 62 - SYS_DUP2 = 63 - SYS_GETPPID = 64 - SYS_GETPGRP = 65 - SYS_SETSID = 66 - SYS_SIGACTION = 67 - SYS_SGETMASK = 68 - SYS_SSETMASK = 69 - SYS_SETREUID = 70 - SYS_SETREGID = 71 - SYS_SIGSUSPEND = 72 - SYS_SIGPENDING = 73 - SYS_SETHOSTNAME = 74 - SYS_SETRLIMIT = 75 - SYS_GETRLIMIT = 76 - SYS_GETRUSAGE = 77 - SYS_GETTIMEOFDAY = 78 - SYS_SETTIMEOFDAY = 79 - SYS_GETGROUPS = 80 - SYS_SETGROUPS = 81 - SYS_SELECT = 82 - SYS_SYMLINK = 83 - SYS_OLDLSTAT = 84 - SYS_READLINK = 85 - SYS_USELIB = 86 - SYS_SWAPON = 87 - SYS_REBOOT = 88 - SYS_READDIR = 89 - SYS_MMAP = 90 - SYS_MUNMAP = 91 - SYS_TRUNCATE = 92 - SYS_FTRUNCATE = 93 - SYS_FCHMOD = 94 - SYS_FCHOWN = 95 - SYS_GETPRIORITY = 96 - SYS_SETPRIORITY = 97 - SYS_PROFIL = 98 - SYS_STATFS = 99 - SYS_FSTATFS = 100 - SYS_IOPERM = 101 - SYS_SOCKETCALL = 102 - SYS_SYSLOG = 103 - SYS_SETITIMER = 104 - SYS_GETITIMER = 105 - SYS_STAT = 106 - SYS_LSTAT = 107 - SYS_FSTAT = 108 - SYS_OLDUNAME = 109 - SYS_IOPL = 110 - SYS_VHANGUP = 111 - SYS_IDLE = 112 - SYS_VM86 = 113 - SYS_WAIT4 = 114 - SYS_SWAPOFF = 115 - SYS_SYSINFO = 116 - SYS_IPC = 117 - SYS_FSYNC = 118 - SYS_SIGRETURN = 119 - SYS_CLONE = 120 - SYS_SETDOMAINNAME = 121 - SYS_UNAME = 122 - SYS_MODIFY_LDT = 123 - SYS_ADJTIMEX = 124 - SYS_MPROTECT = 125 - SYS_SIGPROCMASK = 126 - SYS_CREATE_MODULE = 127 - SYS_INIT_MODULE = 128 - SYS_DELETE_MODULE = 129 - SYS_GET_KERNEL_SYMS = 130 - SYS_QUOTACTL = 131 - SYS_GETPGID = 132 - SYS_FCHDIR = 133 - SYS_BDFLUSH = 134 - SYS_SYSFS = 135 - SYS_PERSONALITY = 136 - SYS_AFS_SYSCALL = 137 - SYS_SETFSUID = 138 - SYS_SETFSGID = 139 - SYS__LLSEEK = 140 - SYS_GETDENTS = 141 - SYS__NEWSELECT = 142 - SYS_FLOCK = 143 - SYS_MSYNC = 144 - SYS_READV = 145 - SYS_WRITEV = 146 - SYS_GETSID = 147 - SYS_FDATASYNC = 148 - SYS__SYSCTL = 149 - SYS_MLOCK = 150 - SYS_MUNLOCK = 151 - SYS_MLOCKALL = 152 - SYS_MUNLOCKALL = 153 - SYS_SCHED_SETPARAM = 154 - SYS_SCHED_GETPARAM = 155 - SYS_SCHED_SETSCHEDULER = 156 - SYS_SCHED_GETSCHEDULER = 157 - SYS_SCHED_YIELD = 158 - SYS_SCHED_GET_PRIORITY_MAX = 159 - SYS_SCHED_GET_PRIORITY_MIN = 160 - SYS_SCHED_RR_GET_INTERVAL = 161 - SYS_NANOSLEEP = 162 - SYS_MREMAP = 163 - SYS_SETRESUID = 164 - SYS_GETRESUID = 165 - SYS_QUERY_MODULE = 166 - SYS_POLL = 167 - SYS_NFSSERVCTL = 168 - SYS_SETRESGID = 169 - SYS_GETRESGID = 170 - SYS_PRCTL = 171 - SYS_RT_SIGRETURN = 172 - SYS_RT_SIGACTION = 173 - SYS_RT_SIGPROCMASK = 174 - SYS_RT_SIGPENDING = 175 - SYS_RT_SIGTIMEDWAIT = 176 - SYS_RT_SIGQUEUEINFO = 177 - SYS_RT_SIGSUSPEND = 178 - SYS_PREAD64 = 179 - SYS_PWRITE64 = 180 - SYS_CHOWN = 181 - SYS_GETCWD = 182 - SYS_CAPGET = 183 - SYS_CAPSET = 184 - SYS_SIGALTSTACK = 185 - SYS_SENDFILE = 186 - SYS_GETPMSG = 187 - SYS_PUTPMSG = 188 - SYS_VFORK = 189 - SYS_UGETRLIMIT = 190 - SYS_READAHEAD = 191 - SYS_PCICONFIG_READ = 198 - SYS_PCICONFIG_WRITE = 199 - SYS_PCICONFIG_IOBASE = 200 - SYS_MULTIPLEXER = 201 - SYS_GETDENTS64 = 202 - SYS_PIVOT_ROOT = 203 - SYS_MADVISE = 205 - SYS_MINCORE = 206 - SYS_GETTID = 207 - SYS_TKILL = 208 - SYS_SETXATTR = 209 - SYS_LSETXATTR = 210 - SYS_FSETXATTR = 211 - SYS_GETXATTR = 212 - SYS_LGETXATTR = 213 - SYS_FGETXATTR = 214 - SYS_LISTXATTR = 215 - SYS_LLISTXATTR = 216 - SYS_FLISTXATTR = 217 - SYS_REMOVEXATTR = 218 - SYS_LREMOVEXATTR = 219 - SYS_FREMOVEXATTR = 220 - SYS_FUTEX = 221 - SYS_SCHED_SETAFFINITY = 222 - SYS_SCHED_GETAFFINITY = 223 - SYS_TUXCALL = 225 - SYS_IO_SETUP = 227 - SYS_IO_DESTROY = 228 - SYS_IO_GETEVENTS = 229 - SYS_IO_SUBMIT = 230 - SYS_IO_CANCEL = 231 - SYS_SET_TID_ADDRESS = 232 - SYS_FADVISE64 = 233 - SYS_EXIT_GROUP = 234 - SYS_LOOKUP_DCOOKIE = 235 - SYS_EPOLL_CREATE = 236 - SYS_EPOLL_CTL = 237 - SYS_EPOLL_WAIT = 238 - SYS_REMAP_FILE_PAGES = 239 - SYS_TIMER_CREATE = 240 - SYS_TIMER_SETTIME = 241 - SYS_TIMER_GETTIME = 242 - SYS_TIMER_GETOVERRUN = 243 - SYS_TIMER_DELETE = 244 - SYS_CLOCK_SETTIME = 245 - SYS_CLOCK_GETTIME = 246 - SYS_CLOCK_GETRES = 247 - SYS_CLOCK_NANOSLEEP = 248 - SYS_SWAPCONTEXT = 249 - SYS_TGKILL = 250 - SYS_UTIMES = 251 - SYS_STATFS64 = 252 - SYS_FSTATFS64 = 253 - SYS_RTAS = 255 - SYS_SYS_DEBUG_SETCONTEXT = 256 - SYS_MIGRATE_PAGES = 258 - SYS_MBIND = 259 - SYS_GET_MEMPOLICY = 260 - SYS_SET_MEMPOLICY = 261 - SYS_MQ_OPEN = 262 - SYS_MQ_UNLINK = 263 - SYS_MQ_TIMEDSEND = 264 - SYS_MQ_TIMEDRECEIVE = 265 - SYS_MQ_NOTIFY = 266 - SYS_MQ_GETSETATTR = 267 - SYS_KEXEC_LOAD = 268 - SYS_ADD_KEY = 269 - SYS_REQUEST_KEY = 270 - SYS_KEYCTL = 271 - SYS_WAITID = 272 - SYS_IOPRIO_SET = 273 - SYS_IOPRIO_GET = 274 - SYS_INOTIFY_INIT = 275 - SYS_INOTIFY_ADD_WATCH = 276 - SYS_INOTIFY_RM_WATCH = 277 - SYS_SPU_RUN = 278 - SYS_SPU_CREATE = 279 - SYS_PSELECT6 = 280 - SYS_PPOLL = 281 - SYS_UNSHARE = 282 - SYS_SPLICE = 283 - SYS_TEE = 284 - SYS_VMSPLICE = 285 - SYS_OPENAT = 286 - SYS_MKDIRAT = 287 - SYS_MKNODAT = 288 - SYS_FCHOWNAT = 289 - SYS_FUTIMESAT = 290 - SYS_NEWFSTATAT = 291 - SYS_UNLINKAT = 292 - SYS_RENAMEAT = 293 - SYS_LINKAT = 294 - SYS_SYMLINKAT = 295 - SYS_READLINKAT = 296 - SYS_FCHMODAT = 297 - SYS_FACCESSAT = 298 - SYS_GET_ROBUST_LIST = 299 - SYS_SET_ROBUST_LIST = 300 - SYS_MOVE_PAGES = 301 - SYS_GETCPU = 302 - SYS_EPOLL_PWAIT = 303 - SYS_UTIMENSAT = 304 - SYS_SIGNALFD = 305 - SYS_TIMERFD_CREATE = 306 - SYS_EVENTFD = 307 - SYS_SYNC_FILE_RANGE2 = 308 - SYS_FALLOCATE = 309 - SYS_SUBPAGE_PROT = 310 - SYS_TIMERFD_SETTIME = 311 - SYS_TIMERFD_GETTIME = 312 - SYS_SIGNALFD4 = 313 - SYS_EVENTFD2 = 314 - SYS_EPOLL_CREATE1 = 315 - SYS_DUP3 = 316 - SYS_PIPE2 = 317 - SYS_INOTIFY_INIT1 = 318 - SYS_PERF_EVENT_OPEN = 319 - SYS_PREADV = 320 - SYS_PWRITEV = 321 - SYS_RT_TGSIGQUEUEINFO = 322 - SYS_FANOTIFY_INIT = 323 - SYS_FANOTIFY_MARK = 324 - SYS_PRLIMIT64 = 325 - SYS_SOCKET = 326 - SYS_BIND = 327 - SYS_CONNECT = 328 - SYS_LISTEN = 329 - SYS_ACCEPT = 330 - SYS_GETSOCKNAME = 331 - SYS_GETPEERNAME = 332 - SYS_SOCKETPAIR = 333 - SYS_SEND = 334 - SYS_SENDTO = 335 - SYS_RECV = 336 - SYS_RECVFROM = 337 - SYS_SHUTDOWN = 338 - SYS_SETSOCKOPT = 339 - SYS_GETSOCKOPT = 340 - SYS_SENDMSG = 341 - SYS_RECVMSG = 342 - SYS_RECVMMSG = 343 - SYS_ACCEPT4 = 344 - SYS_NAME_TO_HANDLE_AT = 345 - SYS_OPEN_BY_HANDLE_AT = 346 - SYS_CLOCK_ADJTIME = 347 - SYS_SYNCFS = 348 - SYS_SENDMMSG = 349 - SYS_SETNS = 350 - SYS_PROCESS_VM_READV = 351 - SYS_PROCESS_VM_WRITEV = 352 - SYS_FINIT_MODULE = 353 - SYS_KCMP = 354 - SYS_SCHED_SETATTR = 355 - SYS_SCHED_GETATTR = 356 - SYS_RENAMEAT2 = 357 - SYS_SECCOMP = 358 - SYS_GETRANDOM = 359 - SYS_MEMFD_CREATE = 360 - SYS_BPF = 361 - SYS_EXECVEAT = 362 - SYS_SWITCH_ENDIAN = 363 - SYS_USERFAULTFD = 364 - SYS_MEMBARRIER = 365 - SYS_MLOCK2 = 378 - SYS_COPY_FILE_RANGE = 379 - SYS_PREADV2 = 380 - SYS_PWRITEV2 = 381 - SYS_KEXEC_FILE_LOAD = 382 - SYS_STATX = 383 - SYS_PKEY_ALLOC = 384 - SYS_PKEY_FREE = 385 - SYS_PKEY_MPROTECT = 386 - SYS_RSEQ = 387 - SYS_IO_PGETEVENTS = 388 - SYS_SEMTIMEDOP = 392 - SYS_SEMGET = 393 - SYS_SEMCTL = 394 - SYS_SHMGET = 395 - SYS_SHMCTL = 396 - SYS_SHMAT = 397 - SYS_SHMDT = 398 - SYS_MSGGET = 399 - SYS_MSGSND = 400 - SYS_MSGRCV = 401 - SYS_MSGCTL = 402 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLONE3 = 435 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAITPID = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_TIME = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BREAK = 17 + SYS_OLDSTAT = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_STIME = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_OLDFSTAT = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_STTY = 31 + SYS_GTTY = 32 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_FTIME = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_PROF = 44 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_LOCK = 53 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_MPX = 56 + SYS_SETPGID = 57 + SYS_ULIMIT = 58 + SYS_OLDOLDUNAME = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SGETMASK = 68 + SYS_SSETMASK = 69 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRLIMIT = 76 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SELECT = 82 + SYS_SYMLINK = 83 + SYS_OLDLSTAT = 84 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_PROFIL = 98 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_IOPERM = 101 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_OLDUNAME = 109 + SYS_IOPL = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_VM86 = 113 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_MODIFY_LDT = 123 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_QUERY_MODULE = 166 + SYS_POLL = 167 + SYS_NFSSERVCTL = 168 + SYS_SETRESGID = 169 + SYS_GETRESGID = 170 + SYS_PRCTL = 171 + SYS_RT_SIGRETURN = 172 + SYS_RT_SIGACTION = 173 + SYS_RT_SIGPROCMASK = 174 + SYS_RT_SIGPENDING = 175 + SYS_RT_SIGTIMEDWAIT = 176 + SYS_RT_SIGQUEUEINFO = 177 + SYS_RT_SIGSUSPEND = 178 + SYS_PREAD64 = 179 + SYS_PWRITE64 = 180 + SYS_CHOWN = 181 + SYS_GETCWD = 182 + SYS_CAPGET = 183 + SYS_CAPSET = 184 + SYS_SIGALTSTACK = 185 + SYS_SENDFILE = 186 + SYS_GETPMSG = 187 + SYS_PUTPMSG = 188 + SYS_VFORK = 189 + SYS_UGETRLIMIT = 190 + SYS_READAHEAD = 191 + SYS_PCICONFIG_READ = 198 + SYS_PCICONFIG_WRITE = 199 + SYS_PCICONFIG_IOBASE = 200 + SYS_MULTIPLEXER = 201 + SYS_GETDENTS64 = 202 + SYS_PIVOT_ROOT = 203 + SYS_MADVISE = 205 + SYS_MINCORE = 206 + SYS_GETTID = 207 + SYS_TKILL = 208 + SYS_SETXATTR = 209 + SYS_LSETXATTR = 210 + SYS_FSETXATTR = 211 + SYS_GETXATTR = 212 + SYS_LGETXATTR = 213 + SYS_FGETXATTR = 214 + SYS_LISTXATTR = 215 + SYS_LLISTXATTR = 216 + SYS_FLISTXATTR = 217 + SYS_REMOVEXATTR = 218 + SYS_LREMOVEXATTR = 219 + SYS_FREMOVEXATTR = 220 + SYS_FUTEX = 221 + SYS_SCHED_SETAFFINITY = 222 + SYS_SCHED_GETAFFINITY = 223 + SYS_TUXCALL = 225 + SYS_IO_SETUP = 227 + SYS_IO_DESTROY = 228 + SYS_IO_GETEVENTS = 229 + SYS_IO_SUBMIT = 230 + SYS_IO_CANCEL = 231 + SYS_SET_TID_ADDRESS = 232 + SYS_FADVISE64 = 233 + SYS_EXIT_GROUP = 234 + SYS_LOOKUP_DCOOKIE = 235 + SYS_EPOLL_CREATE = 236 + SYS_EPOLL_CTL = 237 + SYS_EPOLL_WAIT = 238 + SYS_REMAP_FILE_PAGES = 239 + SYS_TIMER_CREATE = 240 + SYS_TIMER_SETTIME = 241 + SYS_TIMER_GETTIME = 242 + SYS_TIMER_GETOVERRUN = 243 + SYS_TIMER_DELETE = 244 + SYS_CLOCK_SETTIME = 245 + SYS_CLOCK_GETTIME = 246 + SYS_CLOCK_GETRES = 247 + SYS_CLOCK_NANOSLEEP = 248 + SYS_SWAPCONTEXT = 249 + SYS_TGKILL = 250 + SYS_UTIMES = 251 + SYS_STATFS64 = 252 + SYS_FSTATFS64 = 253 + SYS_RTAS = 255 + SYS_SYS_DEBUG_SETCONTEXT = 256 + SYS_MIGRATE_PAGES = 258 + SYS_MBIND = 259 + SYS_GET_MEMPOLICY = 260 + SYS_SET_MEMPOLICY = 261 + SYS_MQ_OPEN = 262 + SYS_MQ_UNLINK = 263 + SYS_MQ_TIMEDSEND = 264 + SYS_MQ_TIMEDRECEIVE = 265 + SYS_MQ_NOTIFY = 266 + SYS_MQ_GETSETATTR = 267 + SYS_KEXEC_LOAD = 268 + SYS_ADD_KEY = 269 + SYS_REQUEST_KEY = 270 + SYS_KEYCTL = 271 + SYS_WAITID = 272 + SYS_IOPRIO_SET = 273 + SYS_IOPRIO_GET = 274 + SYS_INOTIFY_INIT = 275 + SYS_INOTIFY_ADD_WATCH = 276 + SYS_INOTIFY_RM_WATCH = 277 + SYS_SPU_RUN = 278 + SYS_SPU_CREATE = 279 + SYS_PSELECT6 = 280 + SYS_PPOLL = 281 + SYS_UNSHARE = 282 + SYS_SPLICE = 283 + SYS_TEE = 284 + SYS_VMSPLICE = 285 + SYS_OPENAT = 286 + SYS_MKDIRAT = 287 + SYS_MKNODAT = 288 + SYS_FCHOWNAT = 289 + SYS_FUTIMESAT = 290 + SYS_NEWFSTATAT = 291 + SYS_UNLINKAT = 292 + SYS_RENAMEAT = 293 + SYS_LINKAT = 294 + SYS_SYMLINKAT = 295 + SYS_READLINKAT = 296 + SYS_FCHMODAT = 297 + SYS_FACCESSAT = 298 + SYS_GET_ROBUST_LIST = 299 + SYS_SET_ROBUST_LIST = 300 + SYS_MOVE_PAGES = 301 + SYS_GETCPU = 302 + SYS_EPOLL_PWAIT = 303 + SYS_UTIMENSAT = 304 + SYS_SIGNALFD = 305 + SYS_TIMERFD_CREATE = 306 + SYS_EVENTFD = 307 + SYS_SYNC_FILE_RANGE2 = 308 + SYS_FALLOCATE = 309 + SYS_SUBPAGE_PROT = 310 + SYS_TIMERFD_SETTIME = 311 + SYS_TIMERFD_GETTIME = 312 + SYS_SIGNALFD4 = 313 + SYS_EVENTFD2 = 314 + SYS_EPOLL_CREATE1 = 315 + SYS_DUP3 = 316 + SYS_PIPE2 = 317 + SYS_INOTIFY_INIT1 = 318 + SYS_PERF_EVENT_OPEN = 319 + SYS_PREADV = 320 + SYS_PWRITEV = 321 + SYS_RT_TGSIGQUEUEINFO = 322 + SYS_FANOTIFY_INIT = 323 + SYS_FANOTIFY_MARK = 324 + SYS_PRLIMIT64 = 325 + SYS_SOCKET = 326 + SYS_BIND = 327 + SYS_CONNECT = 328 + SYS_LISTEN = 329 + SYS_ACCEPT = 330 + SYS_GETSOCKNAME = 331 + SYS_GETPEERNAME = 332 + SYS_SOCKETPAIR = 333 + SYS_SEND = 334 + SYS_SENDTO = 335 + SYS_RECV = 336 + SYS_RECVFROM = 337 + SYS_SHUTDOWN = 338 + SYS_SETSOCKOPT = 339 + SYS_GETSOCKOPT = 340 + SYS_SENDMSG = 341 + SYS_RECVMSG = 342 + SYS_RECVMMSG = 343 + SYS_ACCEPT4 = 344 + SYS_NAME_TO_HANDLE_AT = 345 + SYS_OPEN_BY_HANDLE_AT = 346 + SYS_CLOCK_ADJTIME = 347 + SYS_SYNCFS = 348 + SYS_SENDMMSG = 349 + SYS_SETNS = 350 + SYS_PROCESS_VM_READV = 351 + SYS_PROCESS_VM_WRITEV = 352 + SYS_FINIT_MODULE = 353 + SYS_KCMP = 354 + SYS_SCHED_SETATTR = 355 + SYS_SCHED_GETATTR = 356 + SYS_RENAMEAT2 = 357 + SYS_SECCOMP = 358 + SYS_GETRANDOM = 359 + SYS_MEMFD_CREATE = 360 + SYS_BPF = 361 + SYS_EXECVEAT = 362 + SYS_SWITCH_ENDIAN = 363 + SYS_USERFAULTFD = 364 + SYS_MEMBARRIER = 365 + SYS_MLOCK2 = 378 + SYS_COPY_FILE_RANGE = 379 + SYS_PREADV2 = 380 + SYS_PWRITEV2 = 381 + SYS_KEXEC_FILE_LOAD = 382 + SYS_STATX = 383 + SYS_PKEY_ALLOC = 384 + SYS_PKEY_FREE = 385 + SYS_PKEY_MPROTECT = 386 + SYS_RSEQ = 387 + SYS_IO_PGETEVENTS = 388 + SYS_SEMTIMEDOP = 392 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 33b33b083..3b405d1f8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -7,400 +7,407 @@ package unix const ( - SYS_RESTART_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAITPID = 7 - SYS_CREAT = 8 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_EXECVE = 11 - SYS_CHDIR = 12 - SYS_TIME = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_LCHOWN = 16 - SYS_BREAK = 17 - SYS_OLDSTAT = 18 - SYS_LSEEK = 19 - SYS_GETPID = 20 - SYS_MOUNT = 21 - SYS_UMOUNT = 22 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_STIME = 25 - SYS_PTRACE = 26 - SYS_ALARM = 27 - SYS_OLDFSTAT = 28 - SYS_PAUSE = 29 - SYS_UTIME = 30 - SYS_STTY = 31 - SYS_GTTY = 32 - SYS_ACCESS = 33 - SYS_NICE = 34 - SYS_FTIME = 35 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_RENAME = 38 - SYS_MKDIR = 39 - SYS_RMDIR = 40 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_TIMES = 43 - SYS_PROF = 44 - SYS_BRK = 45 - SYS_SETGID = 46 - SYS_GETGID = 47 - SYS_SIGNAL = 48 - SYS_GETEUID = 49 - SYS_GETEGID = 50 - SYS_ACCT = 51 - SYS_UMOUNT2 = 52 - SYS_LOCK = 53 - SYS_IOCTL = 54 - SYS_FCNTL = 55 - SYS_MPX = 56 - SYS_SETPGID = 57 - SYS_ULIMIT = 58 - SYS_OLDOLDUNAME = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_USTAT = 62 - SYS_DUP2 = 63 - SYS_GETPPID = 64 - SYS_GETPGRP = 65 - SYS_SETSID = 66 - SYS_SIGACTION = 67 - SYS_SGETMASK = 68 - SYS_SSETMASK = 69 - SYS_SETREUID = 70 - SYS_SETREGID = 71 - SYS_SIGSUSPEND = 72 - SYS_SIGPENDING = 73 - SYS_SETHOSTNAME = 74 - SYS_SETRLIMIT = 75 - SYS_GETRLIMIT = 76 - SYS_GETRUSAGE = 77 - SYS_GETTIMEOFDAY = 78 - SYS_SETTIMEOFDAY = 79 - SYS_GETGROUPS = 80 - SYS_SETGROUPS = 81 - SYS_SELECT = 82 - SYS_SYMLINK = 83 - SYS_OLDLSTAT = 84 - SYS_READLINK = 85 - SYS_USELIB = 86 - SYS_SWAPON = 87 - SYS_REBOOT = 88 - SYS_READDIR = 89 - SYS_MMAP = 90 - SYS_MUNMAP = 91 - SYS_TRUNCATE = 92 - SYS_FTRUNCATE = 93 - SYS_FCHMOD = 94 - SYS_FCHOWN = 95 - SYS_GETPRIORITY = 96 - SYS_SETPRIORITY = 97 - SYS_PROFIL = 98 - SYS_STATFS = 99 - SYS_FSTATFS = 100 - SYS_IOPERM = 101 - SYS_SOCKETCALL = 102 - SYS_SYSLOG = 103 - SYS_SETITIMER = 104 - SYS_GETITIMER = 105 - SYS_STAT = 106 - SYS_LSTAT = 107 - SYS_FSTAT = 108 - SYS_OLDUNAME = 109 - SYS_IOPL = 110 - SYS_VHANGUP = 111 - SYS_IDLE = 112 - SYS_VM86 = 113 - SYS_WAIT4 = 114 - SYS_SWAPOFF = 115 - SYS_SYSINFO = 116 - SYS_IPC = 117 - SYS_FSYNC = 118 - SYS_SIGRETURN = 119 - SYS_CLONE = 120 - SYS_SETDOMAINNAME = 121 - SYS_UNAME = 122 - SYS_MODIFY_LDT = 123 - SYS_ADJTIMEX = 124 - SYS_MPROTECT = 125 - SYS_SIGPROCMASK = 126 - SYS_CREATE_MODULE = 127 - SYS_INIT_MODULE = 128 - SYS_DELETE_MODULE = 129 - SYS_GET_KERNEL_SYMS = 130 - SYS_QUOTACTL = 131 - SYS_GETPGID = 132 - SYS_FCHDIR = 133 - SYS_BDFLUSH = 134 - SYS_SYSFS = 135 - SYS_PERSONALITY = 136 - SYS_AFS_SYSCALL = 137 - SYS_SETFSUID = 138 - SYS_SETFSGID = 139 - SYS__LLSEEK = 140 - SYS_GETDENTS = 141 - SYS__NEWSELECT = 142 - SYS_FLOCK = 143 - SYS_MSYNC = 144 - SYS_READV = 145 - SYS_WRITEV = 146 - SYS_GETSID = 147 - SYS_FDATASYNC = 148 - SYS__SYSCTL = 149 - SYS_MLOCK = 150 - SYS_MUNLOCK = 151 - SYS_MLOCKALL = 152 - SYS_MUNLOCKALL = 153 - SYS_SCHED_SETPARAM = 154 - SYS_SCHED_GETPARAM = 155 - SYS_SCHED_SETSCHEDULER = 156 - SYS_SCHED_GETSCHEDULER = 157 - SYS_SCHED_YIELD = 158 - SYS_SCHED_GET_PRIORITY_MAX = 159 - SYS_SCHED_GET_PRIORITY_MIN = 160 - SYS_SCHED_RR_GET_INTERVAL = 161 - SYS_NANOSLEEP = 162 - SYS_MREMAP = 163 - SYS_SETRESUID = 164 - SYS_GETRESUID = 165 - SYS_QUERY_MODULE = 166 - SYS_POLL = 167 - SYS_NFSSERVCTL = 168 - SYS_SETRESGID = 169 - SYS_GETRESGID = 170 - SYS_PRCTL = 171 - SYS_RT_SIGRETURN = 172 - SYS_RT_SIGACTION = 173 - SYS_RT_SIGPROCMASK = 174 - SYS_RT_SIGPENDING = 175 - SYS_RT_SIGTIMEDWAIT = 176 - SYS_RT_SIGQUEUEINFO = 177 - SYS_RT_SIGSUSPEND = 178 - SYS_PREAD64 = 179 - SYS_PWRITE64 = 180 - SYS_CHOWN = 181 - SYS_GETCWD = 182 - SYS_CAPGET = 183 - SYS_CAPSET = 184 - SYS_SIGALTSTACK = 185 - SYS_SENDFILE = 186 - SYS_GETPMSG = 187 - SYS_PUTPMSG = 188 - SYS_VFORK = 189 - SYS_UGETRLIMIT = 190 - SYS_READAHEAD = 191 - SYS_PCICONFIG_READ = 198 - SYS_PCICONFIG_WRITE = 199 - SYS_PCICONFIG_IOBASE = 200 - SYS_MULTIPLEXER = 201 - SYS_GETDENTS64 = 202 - SYS_PIVOT_ROOT = 203 - SYS_MADVISE = 205 - SYS_MINCORE = 206 - SYS_GETTID = 207 - SYS_TKILL = 208 - SYS_SETXATTR = 209 - SYS_LSETXATTR = 210 - SYS_FSETXATTR = 211 - SYS_GETXATTR = 212 - SYS_LGETXATTR = 213 - SYS_FGETXATTR = 214 - SYS_LISTXATTR = 215 - SYS_LLISTXATTR = 216 - SYS_FLISTXATTR = 217 - SYS_REMOVEXATTR = 218 - SYS_LREMOVEXATTR = 219 - SYS_FREMOVEXATTR = 220 - SYS_FUTEX = 221 - SYS_SCHED_SETAFFINITY = 222 - SYS_SCHED_GETAFFINITY = 223 - SYS_TUXCALL = 225 - SYS_IO_SETUP = 227 - SYS_IO_DESTROY = 228 - SYS_IO_GETEVENTS = 229 - SYS_IO_SUBMIT = 230 - SYS_IO_CANCEL = 231 - SYS_SET_TID_ADDRESS = 232 - SYS_FADVISE64 = 233 - SYS_EXIT_GROUP = 234 - SYS_LOOKUP_DCOOKIE = 235 - SYS_EPOLL_CREATE = 236 - SYS_EPOLL_CTL = 237 - SYS_EPOLL_WAIT = 238 - SYS_REMAP_FILE_PAGES = 239 - SYS_TIMER_CREATE = 240 - SYS_TIMER_SETTIME = 241 - SYS_TIMER_GETTIME = 242 - SYS_TIMER_GETOVERRUN = 243 - SYS_TIMER_DELETE = 244 - SYS_CLOCK_SETTIME = 245 - SYS_CLOCK_GETTIME = 246 - SYS_CLOCK_GETRES = 247 - SYS_CLOCK_NANOSLEEP = 248 - SYS_SWAPCONTEXT = 249 - SYS_TGKILL = 250 - SYS_UTIMES = 251 - SYS_STATFS64 = 252 - SYS_FSTATFS64 = 253 - SYS_RTAS = 255 - SYS_SYS_DEBUG_SETCONTEXT = 256 - SYS_MIGRATE_PAGES = 258 - SYS_MBIND = 259 - SYS_GET_MEMPOLICY = 260 - SYS_SET_MEMPOLICY = 261 - SYS_MQ_OPEN = 262 - SYS_MQ_UNLINK = 263 - SYS_MQ_TIMEDSEND = 264 - SYS_MQ_TIMEDRECEIVE = 265 - SYS_MQ_NOTIFY = 266 - SYS_MQ_GETSETATTR = 267 - SYS_KEXEC_LOAD = 268 - SYS_ADD_KEY = 269 - SYS_REQUEST_KEY = 270 - SYS_KEYCTL = 271 - SYS_WAITID = 272 - SYS_IOPRIO_SET = 273 - SYS_IOPRIO_GET = 274 - SYS_INOTIFY_INIT = 275 - SYS_INOTIFY_ADD_WATCH = 276 - SYS_INOTIFY_RM_WATCH = 277 - SYS_SPU_RUN = 278 - SYS_SPU_CREATE = 279 - SYS_PSELECT6 = 280 - SYS_PPOLL = 281 - SYS_UNSHARE = 282 - SYS_SPLICE = 283 - SYS_TEE = 284 - SYS_VMSPLICE = 285 - SYS_OPENAT = 286 - SYS_MKDIRAT = 287 - SYS_MKNODAT = 288 - SYS_FCHOWNAT = 289 - SYS_FUTIMESAT = 290 - SYS_NEWFSTATAT = 291 - SYS_UNLINKAT = 292 - SYS_RENAMEAT = 293 - SYS_LINKAT = 294 - SYS_SYMLINKAT = 295 - SYS_READLINKAT = 296 - SYS_FCHMODAT = 297 - SYS_FACCESSAT = 298 - SYS_GET_ROBUST_LIST = 299 - SYS_SET_ROBUST_LIST = 300 - SYS_MOVE_PAGES = 301 - SYS_GETCPU = 302 - SYS_EPOLL_PWAIT = 303 - SYS_UTIMENSAT = 304 - SYS_SIGNALFD = 305 - SYS_TIMERFD_CREATE = 306 - SYS_EVENTFD = 307 - SYS_SYNC_FILE_RANGE2 = 308 - SYS_FALLOCATE = 309 - SYS_SUBPAGE_PROT = 310 - SYS_TIMERFD_SETTIME = 311 - SYS_TIMERFD_GETTIME = 312 - SYS_SIGNALFD4 = 313 - SYS_EVENTFD2 = 314 - SYS_EPOLL_CREATE1 = 315 - SYS_DUP3 = 316 - SYS_PIPE2 = 317 - SYS_INOTIFY_INIT1 = 318 - SYS_PERF_EVENT_OPEN = 319 - SYS_PREADV = 320 - SYS_PWRITEV = 321 - SYS_RT_TGSIGQUEUEINFO = 322 - SYS_FANOTIFY_INIT = 323 - SYS_FANOTIFY_MARK = 324 - SYS_PRLIMIT64 = 325 - SYS_SOCKET = 326 - SYS_BIND = 327 - SYS_CONNECT = 328 - SYS_LISTEN = 329 - SYS_ACCEPT = 330 - SYS_GETSOCKNAME = 331 - SYS_GETPEERNAME = 332 - SYS_SOCKETPAIR = 333 - SYS_SEND = 334 - SYS_SENDTO = 335 - SYS_RECV = 336 - SYS_RECVFROM = 337 - SYS_SHUTDOWN = 338 - SYS_SETSOCKOPT = 339 - SYS_GETSOCKOPT = 340 - SYS_SENDMSG = 341 - SYS_RECVMSG = 342 - SYS_RECVMMSG = 343 - SYS_ACCEPT4 = 344 - SYS_NAME_TO_HANDLE_AT = 345 - SYS_OPEN_BY_HANDLE_AT = 346 - SYS_CLOCK_ADJTIME = 347 - SYS_SYNCFS = 348 - SYS_SENDMMSG = 349 - SYS_SETNS = 350 - SYS_PROCESS_VM_READV = 351 - SYS_PROCESS_VM_WRITEV = 352 - SYS_FINIT_MODULE = 353 - SYS_KCMP = 354 - SYS_SCHED_SETATTR = 355 - SYS_SCHED_GETATTR = 356 - SYS_RENAMEAT2 = 357 - SYS_SECCOMP = 358 - SYS_GETRANDOM = 359 - SYS_MEMFD_CREATE = 360 - SYS_BPF = 361 - SYS_EXECVEAT = 362 - SYS_SWITCH_ENDIAN = 363 - SYS_USERFAULTFD = 364 - SYS_MEMBARRIER = 365 - SYS_MLOCK2 = 378 - SYS_COPY_FILE_RANGE = 379 - SYS_PREADV2 = 380 - SYS_PWRITEV2 = 381 - SYS_KEXEC_FILE_LOAD = 382 - SYS_STATX = 383 - SYS_PKEY_ALLOC = 384 - SYS_PKEY_FREE = 385 - SYS_PKEY_MPROTECT = 386 - SYS_RSEQ = 387 - SYS_IO_PGETEVENTS = 388 - SYS_SEMTIMEDOP = 392 - SYS_SEMGET = 393 - SYS_SEMCTL = 394 - SYS_SHMGET = 395 - SYS_SHMCTL = 396 - SYS_SHMAT = 397 - SYS_SHMDT = 398 - SYS_MSGGET = 399 - SYS_MSGSND = 400 - SYS_MSGRCV = 401 - SYS_MSGCTL = 402 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLONE3 = 435 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAITPID = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_TIME = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BREAK = 17 + SYS_OLDSTAT = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_STIME = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_OLDFSTAT = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_STTY = 31 + SYS_GTTY = 32 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_FTIME = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_PROF = 44 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_LOCK = 53 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_MPX = 56 + SYS_SETPGID = 57 + SYS_ULIMIT = 58 + SYS_OLDOLDUNAME = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SGETMASK = 68 + SYS_SSETMASK = 69 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRLIMIT = 76 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SELECT = 82 + SYS_SYMLINK = 83 + SYS_OLDLSTAT = 84 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_PROFIL = 98 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_IOPERM = 101 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_OLDUNAME = 109 + SYS_IOPL = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_VM86 = 113 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_MODIFY_LDT = 123 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_QUERY_MODULE = 166 + SYS_POLL = 167 + SYS_NFSSERVCTL = 168 + SYS_SETRESGID = 169 + SYS_GETRESGID = 170 + SYS_PRCTL = 171 + SYS_RT_SIGRETURN = 172 + SYS_RT_SIGACTION = 173 + SYS_RT_SIGPROCMASK = 174 + SYS_RT_SIGPENDING = 175 + SYS_RT_SIGTIMEDWAIT = 176 + SYS_RT_SIGQUEUEINFO = 177 + SYS_RT_SIGSUSPEND = 178 + SYS_PREAD64 = 179 + SYS_PWRITE64 = 180 + SYS_CHOWN = 181 + SYS_GETCWD = 182 + SYS_CAPGET = 183 + SYS_CAPSET = 184 + SYS_SIGALTSTACK = 185 + SYS_SENDFILE = 186 + SYS_GETPMSG = 187 + SYS_PUTPMSG = 188 + SYS_VFORK = 189 + SYS_UGETRLIMIT = 190 + SYS_READAHEAD = 191 + SYS_PCICONFIG_READ = 198 + SYS_PCICONFIG_WRITE = 199 + SYS_PCICONFIG_IOBASE = 200 + SYS_MULTIPLEXER = 201 + SYS_GETDENTS64 = 202 + SYS_PIVOT_ROOT = 203 + SYS_MADVISE = 205 + SYS_MINCORE = 206 + SYS_GETTID = 207 + SYS_TKILL = 208 + SYS_SETXATTR = 209 + SYS_LSETXATTR = 210 + SYS_FSETXATTR = 211 + SYS_GETXATTR = 212 + SYS_LGETXATTR = 213 + SYS_FGETXATTR = 214 + SYS_LISTXATTR = 215 + SYS_LLISTXATTR = 216 + SYS_FLISTXATTR = 217 + SYS_REMOVEXATTR = 218 + SYS_LREMOVEXATTR = 219 + SYS_FREMOVEXATTR = 220 + SYS_FUTEX = 221 + SYS_SCHED_SETAFFINITY = 222 + SYS_SCHED_GETAFFINITY = 223 + SYS_TUXCALL = 225 + SYS_IO_SETUP = 227 + SYS_IO_DESTROY = 228 + SYS_IO_GETEVENTS = 229 + SYS_IO_SUBMIT = 230 + SYS_IO_CANCEL = 231 + SYS_SET_TID_ADDRESS = 232 + SYS_FADVISE64 = 233 + SYS_EXIT_GROUP = 234 + SYS_LOOKUP_DCOOKIE = 235 + SYS_EPOLL_CREATE = 236 + SYS_EPOLL_CTL = 237 + SYS_EPOLL_WAIT = 238 + SYS_REMAP_FILE_PAGES = 239 + SYS_TIMER_CREATE = 240 + SYS_TIMER_SETTIME = 241 + SYS_TIMER_GETTIME = 242 + SYS_TIMER_GETOVERRUN = 243 + SYS_TIMER_DELETE = 244 + SYS_CLOCK_SETTIME = 245 + SYS_CLOCK_GETTIME = 246 + SYS_CLOCK_GETRES = 247 + SYS_CLOCK_NANOSLEEP = 248 + SYS_SWAPCONTEXT = 249 + SYS_TGKILL = 250 + SYS_UTIMES = 251 + SYS_STATFS64 = 252 + SYS_FSTATFS64 = 253 + SYS_RTAS = 255 + SYS_SYS_DEBUG_SETCONTEXT = 256 + SYS_MIGRATE_PAGES = 258 + SYS_MBIND = 259 + SYS_GET_MEMPOLICY = 260 + SYS_SET_MEMPOLICY = 261 + SYS_MQ_OPEN = 262 + SYS_MQ_UNLINK = 263 + SYS_MQ_TIMEDSEND = 264 + SYS_MQ_TIMEDRECEIVE = 265 + SYS_MQ_NOTIFY = 266 + SYS_MQ_GETSETATTR = 267 + SYS_KEXEC_LOAD = 268 + SYS_ADD_KEY = 269 + SYS_REQUEST_KEY = 270 + SYS_KEYCTL = 271 + SYS_WAITID = 272 + SYS_IOPRIO_SET = 273 + SYS_IOPRIO_GET = 274 + SYS_INOTIFY_INIT = 275 + SYS_INOTIFY_ADD_WATCH = 276 + SYS_INOTIFY_RM_WATCH = 277 + SYS_SPU_RUN = 278 + SYS_SPU_CREATE = 279 + SYS_PSELECT6 = 280 + SYS_PPOLL = 281 + SYS_UNSHARE = 282 + SYS_SPLICE = 283 + SYS_TEE = 284 + SYS_VMSPLICE = 285 + SYS_OPENAT = 286 + SYS_MKDIRAT = 287 + SYS_MKNODAT = 288 + SYS_FCHOWNAT = 289 + SYS_FUTIMESAT = 290 + SYS_NEWFSTATAT = 291 + SYS_UNLINKAT = 292 + SYS_RENAMEAT = 293 + SYS_LINKAT = 294 + SYS_SYMLINKAT = 295 + SYS_READLINKAT = 296 + SYS_FCHMODAT = 297 + SYS_FACCESSAT = 298 + SYS_GET_ROBUST_LIST = 299 + SYS_SET_ROBUST_LIST = 300 + SYS_MOVE_PAGES = 301 + SYS_GETCPU = 302 + SYS_EPOLL_PWAIT = 303 + SYS_UTIMENSAT = 304 + SYS_SIGNALFD = 305 + SYS_TIMERFD_CREATE = 306 + SYS_EVENTFD = 307 + SYS_SYNC_FILE_RANGE2 = 308 + SYS_FALLOCATE = 309 + SYS_SUBPAGE_PROT = 310 + SYS_TIMERFD_SETTIME = 311 + SYS_TIMERFD_GETTIME = 312 + SYS_SIGNALFD4 = 313 + SYS_EVENTFD2 = 314 + SYS_EPOLL_CREATE1 = 315 + SYS_DUP3 = 316 + SYS_PIPE2 = 317 + SYS_INOTIFY_INIT1 = 318 + SYS_PERF_EVENT_OPEN = 319 + SYS_PREADV = 320 + SYS_PWRITEV = 321 + SYS_RT_TGSIGQUEUEINFO = 322 + SYS_FANOTIFY_INIT = 323 + SYS_FANOTIFY_MARK = 324 + SYS_PRLIMIT64 = 325 + SYS_SOCKET = 326 + SYS_BIND = 327 + SYS_CONNECT = 328 + SYS_LISTEN = 329 + SYS_ACCEPT = 330 + SYS_GETSOCKNAME = 331 + SYS_GETPEERNAME = 332 + SYS_SOCKETPAIR = 333 + SYS_SEND = 334 + SYS_SENDTO = 335 + SYS_RECV = 336 + SYS_RECVFROM = 337 + SYS_SHUTDOWN = 338 + SYS_SETSOCKOPT = 339 + SYS_GETSOCKOPT = 340 + SYS_SENDMSG = 341 + SYS_RECVMSG = 342 + SYS_RECVMMSG = 343 + SYS_ACCEPT4 = 344 + SYS_NAME_TO_HANDLE_AT = 345 + SYS_OPEN_BY_HANDLE_AT = 346 + SYS_CLOCK_ADJTIME = 347 + SYS_SYNCFS = 348 + SYS_SENDMMSG = 349 + SYS_SETNS = 350 + SYS_PROCESS_VM_READV = 351 + SYS_PROCESS_VM_WRITEV = 352 + SYS_FINIT_MODULE = 353 + SYS_KCMP = 354 + SYS_SCHED_SETATTR = 355 + SYS_SCHED_GETATTR = 356 + SYS_RENAMEAT2 = 357 + SYS_SECCOMP = 358 + SYS_GETRANDOM = 359 + SYS_MEMFD_CREATE = 360 + SYS_BPF = 361 + SYS_EXECVEAT = 362 + SYS_SWITCH_ENDIAN = 363 + SYS_USERFAULTFD = 364 + SYS_MEMBARRIER = 365 + SYS_MLOCK2 = 378 + SYS_COPY_FILE_RANGE = 379 + SYS_PREADV2 = 380 + SYS_PWRITEV2 = 381 + SYS_KEXEC_FILE_LOAD = 382 + SYS_STATX = 383 + SYS_PKEY_ALLOC = 384 + SYS_PKEY_FREE = 385 + SYS_PKEY_MPROTECT = 386 + SYS_RSEQ = 387 + SYS_IO_PGETEVENTS = 388 + SYS_SEMTIMEDOP = 392 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 66c8a8e09..3a9c96b28 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -7,302 +7,310 @@ package unix const ( - SYS_IO_SETUP = 0 - SYS_IO_DESTROY = 1 - SYS_IO_SUBMIT = 2 - SYS_IO_CANCEL = 3 - SYS_IO_GETEVENTS = 4 - SYS_SETXATTR = 5 - SYS_LSETXATTR = 6 - SYS_FSETXATTR = 7 - SYS_GETXATTR = 8 - SYS_LGETXATTR = 9 - SYS_FGETXATTR = 10 - SYS_LISTXATTR = 11 - SYS_LLISTXATTR = 12 - SYS_FLISTXATTR = 13 - SYS_REMOVEXATTR = 14 - SYS_LREMOVEXATTR = 15 - SYS_FREMOVEXATTR = 16 - SYS_GETCWD = 17 - SYS_LOOKUP_DCOOKIE = 18 - SYS_EVENTFD2 = 19 - SYS_EPOLL_CREATE1 = 20 - SYS_EPOLL_CTL = 21 - SYS_EPOLL_PWAIT = 22 - SYS_DUP = 23 - SYS_DUP3 = 24 - SYS_FCNTL = 25 - SYS_INOTIFY_INIT1 = 26 - SYS_INOTIFY_ADD_WATCH = 27 - SYS_INOTIFY_RM_WATCH = 28 - SYS_IOCTL = 29 - SYS_IOPRIO_SET = 30 - SYS_IOPRIO_GET = 31 - SYS_FLOCK = 32 - SYS_MKNODAT = 33 - SYS_MKDIRAT = 34 - SYS_UNLINKAT = 35 - SYS_SYMLINKAT = 36 - SYS_LINKAT = 37 - SYS_UMOUNT2 = 39 - SYS_MOUNT = 40 - SYS_PIVOT_ROOT = 41 - SYS_NFSSERVCTL = 42 - SYS_STATFS = 43 - SYS_FSTATFS = 44 - SYS_TRUNCATE = 45 - SYS_FTRUNCATE = 46 - SYS_FALLOCATE = 47 - SYS_FACCESSAT = 48 - SYS_CHDIR = 49 - SYS_FCHDIR = 50 - SYS_CHROOT = 51 - SYS_FCHMOD = 52 - SYS_FCHMODAT = 53 - SYS_FCHOWNAT = 54 - SYS_FCHOWN = 55 - SYS_OPENAT = 56 - SYS_CLOSE = 57 - SYS_VHANGUP = 58 - SYS_PIPE2 = 59 - SYS_QUOTACTL = 60 - SYS_GETDENTS64 = 61 - SYS_LSEEK = 62 - SYS_READ = 63 - SYS_WRITE = 64 - SYS_READV = 65 - SYS_WRITEV = 66 - SYS_PREAD64 = 67 - SYS_PWRITE64 = 68 - SYS_PREADV = 69 - SYS_PWRITEV = 70 - SYS_SENDFILE = 71 - SYS_PSELECT6 = 72 - SYS_PPOLL = 73 - SYS_SIGNALFD4 = 74 - SYS_VMSPLICE = 75 - SYS_SPLICE = 76 - SYS_TEE = 77 - SYS_READLINKAT = 78 - SYS_FSTATAT = 79 - SYS_FSTAT = 80 - SYS_SYNC = 81 - SYS_FSYNC = 82 - SYS_FDATASYNC = 83 - SYS_SYNC_FILE_RANGE = 84 - SYS_TIMERFD_CREATE = 85 - SYS_TIMERFD_SETTIME = 86 - SYS_TIMERFD_GETTIME = 87 - SYS_UTIMENSAT = 88 - SYS_ACCT = 89 - SYS_CAPGET = 90 - SYS_CAPSET = 91 - SYS_PERSONALITY = 92 - SYS_EXIT = 93 - SYS_EXIT_GROUP = 94 - SYS_WAITID = 95 - SYS_SET_TID_ADDRESS = 96 - SYS_UNSHARE = 97 - SYS_FUTEX = 98 - SYS_SET_ROBUST_LIST = 99 - SYS_GET_ROBUST_LIST = 100 - SYS_NANOSLEEP = 101 - SYS_GETITIMER = 102 - SYS_SETITIMER = 103 - SYS_KEXEC_LOAD = 104 - SYS_INIT_MODULE = 105 - SYS_DELETE_MODULE = 106 - SYS_TIMER_CREATE = 107 - SYS_TIMER_GETTIME = 108 - SYS_TIMER_GETOVERRUN = 109 - SYS_TIMER_SETTIME = 110 - SYS_TIMER_DELETE = 111 - SYS_CLOCK_SETTIME = 112 - SYS_CLOCK_GETTIME = 113 - SYS_CLOCK_GETRES = 114 - SYS_CLOCK_NANOSLEEP = 115 - SYS_SYSLOG = 116 - SYS_PTRACE = 117 - SYS_SCHED_SETPARAM = 118 - SYS_SCHED_SETSCHEDULER = 119 - SYS_SCHED_GETSCHEDULER = 120 - SYS_SCHED_GETPARAM = 121 - SYS_SCHED_SETAFFINITY = 122 - SYS_SCHED_GETAFFINITY = 123 - SYS_SCHED_YIELD = 124 - SYS_SCHED_GET_PRIORITY_MAX = 125 - SYS_SCHED_GET_PRIORITY_MIN = 126 - SYS_SCHED_RR_GET_INTERVAL = 127 - SYS_RESTART_SYSCALL = 128 - SYS_KILL = 129 - SYS_TKILL = 130 - SYS_TGKILL = 131 - SYS_SIGALTSTACK = 132 - SYS_RT_SIGSUSPEND = 133 - SYS_RT_SIGACTION = 134 - SYS_RT_SIGPROCMASK = 135 - SYS_RT_SIGPENDING = 136 - SYS_RT_SIGTIMEDWAIT = 137 - SYS_RT_SIGQUEUEINFO = 138 - SYS_RT_SIGRETURN = 139 - SYS_SETPRIORITY = 140 - SYS_GETPRIORITY = 141 - SYS_REBOOT = 142 - SYS_SETREGID = 143 - SYS_SETGID = 144 - SYS_SETREUID = 145 - SYS_SETUID = 146 - SYS_SETRESUID = 147 - SYS_GETRESUID = 148 - SYS_SETRESGID = 149 - SYS_GETRESGID = 150 - SYS_SETFSUID = 151 - SYS_SETFSGID = 152 - SYS_TIMES = 153 - SYS_SETPGID = 154 - SYS_GETPGID = 155 - SYS_GETSID = 156 - SYS_SETSID = 157 - SYS_GETGROUPS = 158 - SYS_SETGROUPS = 159 - SYS_UNAME = 160 - SYS_SETHOSTNAME = 161 - SYS_SETDOMAINNAME = 162 - SYS_GETRLIMIT = 163 - SYS_SETRLIMIT = 164 - SYS_GETRUSAGE = 165 - SYS_UMASK = 166 - SYS_PRCTL = 167 - SYS_GETCPU = 168 - SYS_GETTIMEOFDAY = 169 - SYS_SETTIMEOFDAY = 170 - SYS_ADJTIMEX = 171 - SYS_GETPID = 172 - SYS_GETPPID = 173 - SYS_GETUID = 174 - SYS_GETEUID = 175 - SYS_GETGID = 176 - SYS_GETEGID = 177 - SYS_GETTID = 178 - SYS_SYSINFO = 179 - SYS_MQ_OPEN = 180 - SYS_MQ_UNLINK = 181 - SYS_MQ_TIMEDSEND = 182 - SYS_MQ_TIMEDRECEIVE = 183 - SYS_MQ_NOTIFY = 184 - SYS_MQ_GETSETATTR = 185 - SYS_MSGGET = 186 - SYS_MSGCTL = 187 - SYS_MSGRCV = 188 - SYS_MSGSND = 189 - SYS_SEMGET = 190 - SYS_SEMCTL = 191 - SYS_SEMTIMEDOP = 192 - SYS_SEMOP = 193 - SYS_SHMGET = 194 - SYS_SHMCTL = 195 - SYS_SHMAT = 196 - SYS_SHMDT = 197 - SYS_SOCKET = 198 - SYS_SOCKETPAIR = 199 - SYS_BIND = 200 - SYS_LISTEN = 201 - SYS_ACCEPT = 202 - SYS_CONNECT = 203 - SYS_GETSOCKNAME = 204 - SYS_GETPEERNAME = 205 - SYS_SENDTO = 206 - SYS_RECVFROM = 207 - SYS_SETSOCKOPT = 208 - SYS_GETSOCKOPT = 209 - SYS_SHUTDOWN = 210 - SYS_SENDMSG = 211 - SYS_RECVMSG = 212 - SYS_READAHEAD = 213 - SYS_BRK = 214 - SYS_MUNMAP = 215 - SYS_MREMAP = 216 - SYS_ADD_KEY = 217 - SYS_REQUEST_KEY = 218 - SYS_KEYCTL = 219 - SYS_CLONE = 220 - SYS_EXECVE = 221 - SYS_MMAP = 222 - SYS_FADVISE64 = 223 - SYS_SWAPON = 224 - SYS_SWAPOFF = 225 - SYS_MPROTECT = 226 - SYS_MSYNC = 227 - SYS_MLOCK = 228 - SYS_MUNLOCK = 229 - SYS_MLOCKALL = 230 - SYS_MUNLOCKALL = 231 - SYS_MINCORE = 232 - SYS_MADVISE = 233 - SYS_REMAP_FILE_PAGES = 234 - SYS_MBIND = 235 - SYS_GET_MEMPOLICY = 236 - SYS_SET_MEMPOLICY = 237 - SYS_MIGRATE_PAGES = 238 - SYS_MOVE_PAGES = 239 - SYS_RT_TGSIGQUEUEINFO = 240 - SYS_PERF_EVENT_OPEN = 241 - SYS_ACCEPT4 = 242 - SYS_RECVMMSG = 243 - SYS_ARCH_SPECIFIC_SYSCALL = 244 - SYS_WAIT4 = 260 - SYS_PRLIMIT64 = 261 - SYS_FANOTIFY_INIT = 262 - SYS_FANOTIFY_MARK = 263 - SYS_NAME_TO_HANDLE_AT = 264 - SYS_OPEN_BY_HANDLE_AT = 265 - SYS_CLOCK_ADJTIME = 266 - SYS_SYNCFS = 267 - SYS_SETNS = 268 - SYS_SENDMMSG = 269 - SYS_PROCESS_VM_READV = 270 - SYS_PROCESS_VM_WRITEV = 271 - SYS_KCMP = 272 - SYS_FINIT_MODULE = 273 - SYS_SCHED_SETATTR = 274 - SYS_SCHED_GETATTR = 275 - SYS_RENAMEAT2 = 276 - SYS_SECCOMP = 277 - SYS_GETRANDOM = 278 - SYS_MEMFD_CREATE = 279 - SYS_BPF = 280 - SYS_EXECVEAT = 281 - SYS_USERFAULTFD = 282 - SYS_MEMBARRIER = 283 - SYS_MLOCK2 = 284 - SYS_COPY_FILE_RANGE = 285 - SYS_PREADV2 = 286 - SYS_PWRITEV2 = 287 - SYS_PKEY_MPROTECT = 288 - SYS_PKEY_ALLOC = 289 - SYS_PKEY_FREE = 290 - SYS_STATX = 291 - SYS_IO_PGETEVENTS = 292 - SYS_RSEQ = 293 - SYS_KEXEC_FILE_LOAD = 294 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLONE3 = 435 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_IO_SETUP = 0 + SYS_IO_DESTROY = 1 + SYS_IO_SUBMIT = 2 + SYS_IO_CANCEL = 3 + SYS_IO_GETEVENTS = 4 + SYS_SETXATTR = 5 + SYS_LSETXATTR = 6 + SYS_FSETXATTR = 7 + SYS_GETXATTR = 8 + SYS_LGETXATTR = 9 + SYS_FGETXATTR = 10 + SYS_LISTXATTR = 11 + SYS_LLISTXATTR = 12 + SYS_FLISTXATTR = 13 + SYS_REMOVEXATTR = 14 + SYS_LREMOVEXATTR = 15 + SYS_FREMOVEXATTR = 16 + SYS_GETCWD = 17 + SYS_LOOKUP_DCOOKIE = 18 + SYS_EVENTFD2 = 19 + SYS_EPOLL_CREATE1 = 20 + SYS_EPOLL_CTL = 21 + SYS_EPOLL_PWAIT = 22 + SYS_DUP = 23 + SYS_DUP3 = 24 + SYS_FCNTL = 25 + SYS_INOTIFY_INIT1 = 26 + SYS_INOTIFY_ADD_WATCH = 27 + SYS_INOTIFY_RM_WATCH = 28 + SYS_IOCTL = 29 + SYS_IOPRIO_SET = 30 + SYS_IOPRIO_GET = 31 + SYS_FLOCK = 32 + SYS_MKNODAT = 33 + SYS_MKDIRAT = 34 + SYS_UNLINKAT = 35 + SYS_SYMLINKAT = 36 + SYS_LINKAT = 37 + SYS_UMOUNT2 = 39 + SYS_MOUNT = 40 + SYS_PIVOT_ROOT = 41 + SYS_NFSSERVCTL = 42 + SYS_STATFS = 43 + SYS_FSTATFS = 44 + SYS_TRUNCATE = 45 + SYS_FTRUNCATE = 46 + SYS_FALLOCATE = 47 + SYS_FACCESSAT = 48 + SYS_CHDIR = 49 + SYS_FCHDIR = 50 + SYS_CHROOT = 51 + SYS_FCHMOD = 52 + SYS_FCHMODAT = 53 + SYS_FCHOWNAT = 54 + SYS_FCHOWN = 55 + SYS_OPENAT = 56 + SYS_CLOSE = 57 + SYS_VHANGUP = 58 + SYS_PIPE2 = 59 + SYS_QUOTACTL = 60 + SYS_GETDENTS64 = 61 + SYS_LSEEK = 62 + SYS_READ = 63 + SYS_WRITE = 64 + SYS_READV = 65 + SYS_WRITEV = 66 + SYS_PREAD64 = 67 + SYS_PWRITE64 = 68 + SYS_PREADV = 69 + SYS_PWRITEV = 70 + SYS_SENDFILE = 71 + SYS_PSELECT6 = 72 + SYS_PPOLL = 73 + SYS_SIGNALFD4 = 74 + SYS_VMSPLICE = 75 + SYS_SPLICE = 76 + SYS_TEE = 77 + SYS_READLINKAT = 78 + SYS_FSTATAT = 79 + SYS_FSTAT = 80 + SYS_SYNC = 81 + SYS_FSYNC = 82 + SYS_FDATASYNC = 83 + SYS_SYNC_FILE_RANGE = 84 + SYS_TIMERFD_CREATE = 85 + SYS_TIMERFD_SETTIME = 86 + SYS_TIMERFD_GETTIME = 87 + SYS_UTIMENSAT = 88 + SYS_ACCT = 89 + SYS_CAPGET = 90 + SYS_CAPSET = 91 + SYS_PERSONALITY = 92 + SYS_EXIT = 93 + SYS_EXIT_GROUP = 94 + SYS_WAITID = 95 + SYS_SET_TID_ADDRESS = 96 + SYS_UNSHARE = 97 + SYS_FUTEX = 98 + SYS_SET_ROBUST_LIST = 99 + SYS_GET_ROBUST_LIST = 100 + SYS_NANOSLEEP = 101 + SYS_GETITIMER = 102 + SYS_SETITIMER = 103 + SYS_KEXEC_LOAD = 104 + SYS_INIT_MODULE = 105 + SYS_DELETE_MODULE = 106 + SYS_TIMER_CREATE = 107 + SYS_TIMER_GETTIME = 108 + SYS_TIMER_GETOVERRUN = 109 + SYS_TIMER_SETTIME = 110 + SYS_TIMER_DELETE = 111 + SYS_CLOCK_SETTIME = 112 + SYS_CLOCK_GETTIME = 113 + SYS_CLOCK_GETRES = 114 + SYS_CLOCK_NANOSLEEP = 115 + SYS_SYSLOG = 116 + SYS_PTRACE = 117 + SYS_SCHED_SETPARAM = 118 + SYS_SCHED_SETSCHEDULER = 119 + SYS_SCHED_GETSCHEDULER = 120 + SYS_SCHED_GETPARAM = 121 + SYS_SCHED_SETAFFINITY = 122 + SYS_SCHED_GETAFFINITY = 123 + SYS_SCHED_YIELD = 124 + SYS_SCHED_GET_PRIORITY_MAX = 125 + SYS_SCHED_GET_PRIORITY_MIN = 126 + SYS_SCHED_RR_GET_INTERVAL = 127 + SYS_RESTART_SYSCALL = 128 + SYS_KILL = 129 + SYS_TKILL = 130 + SYS_TGKILL = 131 + SYS_SIGALTSTACK = 132 + SYS_RT_SIGSUSPEND = 133 + SYS_RT_SIGACTION = 134 + SYS_RT_SIGPROCMASK = 135 + SYS_RT_SIGPENDING = 136 + SYS_RT_SIGTIMEDWAIT = 137 + SYS_RT_SIGQUEUEINFO = 138 + SYS_RT_SIGRETURN = 139 + SYS_SETPRIORITY = 140 + SYS_GETPRIORITY = 141 + SYS_REBOOT = 142 + SYS_SETREGID = 143 + SYS_SETGID = 144 + SYS_SETREUID = 145 + SYS_SETUID = 146 + SYS_SETRESUID = 147 + SYS_GETRESUID = 148 + SYS_SETRESGID = 149 + SYS_GETRESGID = 150 + SYS_SETFSUID = 151 + SYS_SETFSGID = 152 + SYS_TIMES = 153 + SYS_SETPGID = 154 + SYS_GETPGID = 155 + SYS_GETSID = 156 + SYS_SETSID = 157 + SYS_GETGROUPS = 158 + SYS_SETGROUPS = 159 + SYS_UNAME = 160 + SYS_SETHOSTNAME = 161 + SYS_SETDOMAINNAME = 162 + SYS_GETRLIMIT = 163 + SYS_SETRLIMIT = 164 + SYS_GETRUSAGE = 165 + SYS_UMASK = 166 + SYS_PRCTL = 167 + SYS_GETCPU = 168 + SYS_GETTIMEOFDAY = 169 + SYS_SETTIMEOFDAY = 170 + SYS_ADJTIMEX = 171 + SYS_GETPID = 172 + SYS_GETPPID = 173 + SYS_GETUID = 174 + SYS_GETEUID = 175 + SYS_GETGID = 176 + SYS_GETEGID = 177 + SYS_GETTID = 178 + SYS_SYSINFO = 179 + SYS_MQ_OPEN = 180 + SYS_MQ_UNLINK = 181 + SYS_MQ_TIMEDSEND = 182 + SYS_MQ_TIMEDRECEIVE = 183 + SYS_MQ_NOTIFY = 184 + SYS_MQ_GETSETATTR = 185 + SYS_MSGGET = 186 + SYS_MSGCTL = 187 + SYS_MSGRCV = 188 + SYS_MSGSND = 189 + SYS_SEMGET = 190 + SYS_SEMCTL = 191 + SYS_SEMTIMEDOP = 192 + SYS_SEMOP = 193 + SYS_SHMGET = 194 + SYS_SHMCTL = 195 + SYS_SHMAT = 196 + SYS_SHMDT = 197 + SYS_SOCKET = 198 + SYS_SOCKETPAIR = 199 + SYS_BIND = 200 + SYS_LISTEN = 201 + SYS_ACCEPT = 202 + SYS_CONNECT = 203 + SYS_GETSOCKNAME = 204 + SYS_GETPEERNAME = 205 + SYS_SENDTO = 206 + SYS_RECVFROM = 207 + SYS_SETSOCKOPT = 208 + SYS_GETSOCKOPT = 209 + SYS_SHUTDOWN = 210 + SYS_SENDMSG = 211 + SYS_RECVMSG = 212 + SYS_READAHEAD = 213 + SYS_BRK = 214 + SYS_MUNMAP = 215 + SYS_MREMAP = 216 + SYS_ADD_KEY = 217 + SYS_REQUEST_KEY = 218 + SYS_KEYCTL = 219 + SYS_CLONE = 220 + SYS_EXECVE = 221 + SYS_MMAP = 222 + SYS_FADVISE64 = 223 + SYS_SWAPON = 224 + SYS_SWAPOFF = 225 + SYS_MPROTECT = 226 + SYS_MSYNC = 227 + SYS_MLOCK = 228 + SYS_MUNLOCK = 229 + SYS_MLOCKALL = 230 + SYS_MUNLOCKALL = 231 + SYS_MINCORE = 232 + SYS_MADVISE = 233 + SYS_REMAP_FILE_PAGES = 234 + SYS_MBIND = 235 + SYS_GET_MEMPOLICY = 236 + SYS_SET_MEMPOLICY = 237 + SYS_MIGRATE_PAGES = 238 + SYS_MOVE_PAGES = 239 + SYS_RT_TGSIGQUEUEINFO = 240 + SYS_PERF_EVENT_OPEN = 241 + SYS_ACCEPT4 = 242 + SYS_RECVMMSG = 243 + SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_WAIT4 = 260 + SYS_PRLIMIT64 = 261 + SYS_FANOTIFY_INIT = 262 + SYS_FANOTIFY_MARK = 263 + SYS_NAME_TO_HANDLE_AT = 264 + SYS_OPEN_BY_HANDLE_AT = 265 + SYS_CLOCK_ADJTIME = 266 + SYS_SYNCFS = 267 + SYS_SETNS = 268 + SYS_SENDMMSG = 269 + SYS_PROCESS_VM_READV = 270 + SYS_PROCESS_VM_WRITEV = 271 + SYS_KCMP = 272 + SYS_FINIT_MODULE = 273 + SYS_SCHED_SETATTR = 274 + SYS_SCHED_GETATTR = 275 + SYS_RENAMEAT2 = 276 + SYS_SECCOMP = 277 + SYS_GETRANDOM = 278 + SYS_MEMFD_CREATE = 279 + SYS_BPF = 280 + SYS_EXECVEAT = 281 + SYS_USERFAULTFD = 282 + SYS_MEMBARRIER = 283 + SYS_MLOCK2 = 284 + SYS_COPY_FILE_RANGE = 285 + SYS_PREADV2 = 286 + SYS_PWRITEV2 = 287 + SYS_PKEY_MPROTECT = 288 + SYS_PKEY_ALLOC = 289 + SYS_PKEY_FREE = 290 + SYS_STATX = 291 + SYS_IO_PGETEVENTS = 292 + SYS_RSEQ = 293 + SYS_KEXEC_FILE_LOAD = 294 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_MEMFD_SECRET = 447 + SYS_PROCESS_MRELEASE = 448 + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index aea5760ce..8ffa66469 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -7,365 +7,372 @@ package unix const ( - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_RESTART_SYSCALL = 7 - SYS_CREAT = 8 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_EXECVE = 11 - SYS_CHDIR = 12 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_LSEEK = 19 - SYS_GETPID = 20 - SYS_MOUNT = 21 - SYS_UMOUNT = 22 - SYS_PTRACE = 26 - SYS_ALARM = 27 - SYS_PAUSE = 29 - SYS_UTIME = 30 - SYS_ACCESS = 33 - SYS_NICE = 34 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_RENAME = 38 - SYS_MKDIR = 39 - SYS_RMDIR = 40 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_TIMES = 43 - SYS_BRK = 45 - SYS_SIGNAL = 48 - SYS_ACCT = 51 - SYS_UMOUNT2 = 52 - SYS_IOCTL = 54 - SYS_FCNTL = 55 - SYS_SETPGID = 57 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_USTAT = 62 - SYS_DUP2 = 63 - SYS_GETPPID = 64 - SYS_GETPGRP = 65 - SYS_SETSID = 66 - SYS_SIGACTION = 67 - SYS_SIGSUSPEND = 72 - SYS_SIGPENDING = 73 - SYS_SETHOSTNAME = 74 - SYS_SETRLIMIT = 75 - SYS_GETRUSAGE = 77 - SYS_GETTIMEOFDAY = 78 - SYS_SETTIMEOFDAY = 79 - SYS_SYMLINK = 83 - SYS_READLINK = 85 - SYS_USELIB = 86 - SYS_SWAPON = 87 - SYS_REBOOT = 88 - SYS_READDIR = 89 - SYS_MMAP = 90 - SYS_MUNMAP = 91 - SYS_TRUNCATE = 92 - SYS_FTRUNCATE = 93 - SYS_FCHMOD = 94 - SYS_GETPRIORITY = 96 - SYS_SETPRIORITY = 97 - SYS_STATFS = 99 - SYS_FSTATFS = 100 - SYS_SOCKETCALL = 102 - SYS_SYSLOG = 103 - SYS_SETITIMER = 104 - SYS_GETITIMER = 105 - SYS_STAT = 106 - SYS_LSTAT = 107 - SYS_FSTAT = 108 - SYS_LOOKUP_DCOOKIE = 110 - SYS_VHANGUP = 111 - SYS_IDLE = 112 - SYS_WAIT4 = 114 - SYS_SWAPOFF = 115 - SYS_SYSINFO = 116 - SYS_IPC = 117 - SYS_FSYNC = 118 - SYS_SIGRETURN = 119 - SYS_CLONE = 120 - SYS_SETDOMAINNAME = 121 - SYS_UNAME = 122 - SYS_ADJTIMEX = 124 - SYS_MPROTECT = 125 - SYS_SIGPROCMASK = 126 - SYS_CREATE_MODULE = 127 - SYS_INIT_MODULE = 128 - SYS_DELETE_MODULE = 129 - SYS_GET_KERNEL_SYMS = 130 - SYS_QUOTACTL = 131 - SYS_GETPGID = 132 - SYS_FCHDIR = 133 - SYS_BDFLUSH = 134 - SYS_SYSFS = 135 - SYS_PERSONALITY = 136 - SYS_AFS_SYSCALL = 137 - SYS_GETDENTS = 141 - SYS_SELECT = 142 - SYS_FLOCK = 143 - SYS_MSYNC = 144 - SYS_READV = 145 - SYS_WRITEV = 146 - SYS_GETSID = 147 - SYS_FDATASYNC = 148 - SYS__SYSCTL = 149 - SYS_MLOCK = 150 - SYS_MUNLOCK = 151 - SYS_MLOCKALL = 152 - SYS_MUNLOCKALL = 153 - SYS_SCHED_SETPARAM = 154 - SYS_SCHED_GETPARAM = 155 - SYS_SCHED_SETSCHEDULER = 156 - SYS_SCHED_GETSCHEDULER = 157 - SYS_SCHED_YIELD = 158 - SYS_SCHED_GET_PRIORITY_MAX = 159 - SYS_SCHED_GET_PRIORITY_MIN = 160 - SYS_SCHED_RR_GET_INTERVAL = 161 - SYS_NANOSLEEP = 162 - SYS_MREMAP = 163 - SYS_QUERY_MODULE = 167 - SYS_POLL = 168 - SYS_NFSSERVCTL = 169 - SYS_PRCTL = 172 - SYS_RT_SIGRETURN = 173 - SYS_RT_SIGACTION = 174 - SYS_RT_SIGPROCMASK = 175 - SYS_RT_SIGPENDING = 176 - SYS_RT_SIGTIMEDWAIT = 177 - SYS_RT_SIGQUEUEINFO = 178 - SYS_RT_SIGSUSPEND = 179 - SYS_PREAD64 = 180 - SYS_PWRITE64 = 181 - SYS_GETCWD = 183 - SYS_CAPGET = 184 - SYS_CAPSET = 185 - SYS_SIGALTSTACK = 186 - SYS_SENDFILE = 187 - SYS_GETPMSG = 188 - SYS_PUTPMSG = 189 - SYS_VFORK = 190 - SYS_GETRLIMIT = 191 - SYS_LCHOWN = 198 - SYS_GETUID = 199 - SYS_GETGID = 200 - SYS_GETEUID = 201 - SYS_GETEGID = 202 - SYS_SETREUID = 203 - SYS_SETREGID = 204 - SYS_GETGROUPS = 205 - SYS_SETGROUPS = 206 - SYS_FCHOWN = 207 - SYS_SETRESUID = 208 - SYS_GETRESUID = 209 - SYS_SETRESGID = 210 - SYS_GETRESGID = 211 - SYS_CHOWN = 212 - SYS_SETUID = 213 - SYS_SETGID = 214 - SYS_SETFSUID = 215 - SYS_SETFSGID = 216 - SYS_PIVOT_ROOT = 217 - SYS_MINCORE = 218 - SYS_MADVISE = 219 - SYS_GETDENTS64 = 220 - SYS_READAHEAD = 222 - SYS_SETXATTR = 224 - SYS_LSETXATTR = 225 - SYS_FSETXATTR = 226 - SYS_GETXATTR = 227 - SYS_LGETXATTR = 228 - SYS_FGETXATTR = 229 - SYS_LISTXATTR = 230 - SYS_LLISTXATTR = 231 - SYS_FLISTXATTR = 232 - SYS_REMOVEXATTR = 233 - SYS_LREMOVEXATTR = 234 - SYS_FREMOVEXATTR = 235 - SYS_GETTID = 236 - SYS_TKILL = 237 - SYS_FUTEX = 238 - SYS_SCHED_SETAFFINITY = 239 - SYS_SCHED_GETAFFINITY = 240 - SYS_TGKILL = 241 - SYS_IO_SETUP = 243 - SYS_IO_DESTROY = 244 - SYS_IO_GETEVENTS = 245 - SYS_IO_SUBMIT = 246 - SYS_IO_CANCEL = 247 - SYS_EXIT_GROUP = 248 - SYS_EPOLL_CREATE = 249 - SYS_EPOLL_CTL = 250 - SYS_EPOLL_WAIT = 251 - SYS_SET_TID_ADDRESS = 252 - SYS_FADVISE64 = 253 - SYS_TIMER_CREATE = 254 - SYS_TIMER_SETTIME = 255 - SYS_TIMER_GETTIME = 256 - SYS_TIMER_GETOVERRUN = 257 - SYS_TIMER_DELETE = 258 - SYS_CLOCK_SETTIME = 259 - SYS_CLOCK_GETTIME = 260 - SYS_CLOCK_GETRES = 261 - SYS_CLOCK_NANOSLEEP = 262 - SYS_STATFS64 = 265 - SYS_FSTATFS64 = 266 - SYS_REMAP_FILE_PAGES = 267 - SYS_MBIND = 268 - SYS_GET_MEMPOLICY = 269 - SYS_SET_MEMPOLICY = 270 - SYS_MQ_OPEN = 271 - SYS_MQ_UNLINK = 272 - SYS_MQ_TIMEDSEND = 273 - SYS_MQ_TIMEDRECEIVE = 274 - SYS_MQ_NOTIFY = 275 - SYS_MQ_GETSETATTR = 276 - SYS_KEXEC_LOAD = 277 - SYS_ADD_KEY = 278 - SYS_REQUEST_KEY = 279 - SYS_KEYCTL = 280 - SYS_WAITID = 281 - SYS_IOPRIO_SET = 282 - SYS_IOPRIO_GET = 283 - SYS_INOTIFY_INIT = 284 - SYS_INOTIFY_ADD_WATCH = 285 - SYS_INOTIFY_RM_WATCH = 286 - SYS_MIGRATE_PAGES = 287 - SYS_OPENAT = 288 - SYS_MKDIRAT = 289 - SYS_MKNODAT = 290 - SYS_FCHOWNAT = 291 - SYS_FUTIMESAT = 292 - SYS_NEWFSTATAT = 293 - SYS_UNLINKAT = 294 - SYS_RENAMEAT = 295 - SYS_LINKAT = 296 - SYS_SYMLINKAT = 297 - SYS_READLINKAT = 298 - SYS_FCHMODAT = 299 - SYS_FACCESSAT = 300 - SYS_PSELECT6 = 301 - SYS_PPOLL = 302 - SYS_UNSHARE = 303 - SYS_SET_ROBUST_LIST = 304 - SYS_GET_ROBUST_LIST = 305 - SYS_SPLICE = 306 - SYS_SYNC_FILE_RANGE = 307 - SYS_TEE = 308 - SYS_VMSPLICE = 309 - SYS_MOVE_PAGES = 310 - SYS_GETCPU = 311 - SYS_EPOLL_PWAIT = 312 - SYS_UTIMES = 313 - SYS_FALLOCATE = 314 - SYS_UTIMENSAT = 315 - SYS_SIGNALFD = 316 - SYS_TIMERFD = 317 - SYS_EVENTFD = 318 - SYS_TIMERFD_CREATE = 319 - SYS_TIMERFD_SETTIME = 320 - SYS_TIMERFD_GETTIME = 321 - SYS_SIGNALFD4 = 322 - SYS_EVENTFD2 = 323 - SYS_INOTIFY_INIT1 = 324 - SYS_PIPE2 = 325 - SYS_DUP3 = 326 - SYS_EPOLL_CREATE1 = 327 - SYS_PREADV = 328 - SYS_PWRITEV = 329 - SYS_RT_TGSIGQUEUEINFO = 330 - SYS_PERF_EVENT_OPEN = 331 - SYS_FANOTIFY_INIT = 332 - SYS_FANOTIFY_MARK = 333 - SYS_PRLIMIT64 = 334 - SYS_NAME_TO_HANDLE_AT = 335 - SYS_OPEN_BY_HANDLE_AT = 336 - SYS_CLOCK_ADJTIME = 337 - SYS_SYNCFS = 338 - SYS_SETNS = 339 - SYS_PROCESS_VM_READV = 340 - SYS_PROCESS_VM_WRITEV = 341 - SYS_S390_RUNTIME_INSTR = 342 - SYS_KCMP = 343 - SYS_FINIT_MODULE = 344 - SYS_SCHED_SETATTR = 345 - SYS_SCHED_GETATTR = 346 - SYS_RENAMEAT2 = 347 - SYS_SECCOMP = 348 - SYS_GETRANDOM = 349 - SYS_MEMFD_CREATE = 350 - SYS_BPF = 351 - SYS_S390_PCI_MMIO_WRITE = 352 - SYS_S390_PCI_MMIO_READ = 353 - SYS_EXECVEAT = 354 - SYS_USERFAULTFD = 355 - SYS_MEMBARRIER = 356 - SYS_RECVMMSG = 357 - SYS_SENDMMSG = 358 - SYS_SOCKET = 359 - SYS_SOCKETPAIR = 360 - SYS_BIND = 361 - SYS_CONNECT = 362 - SYS_LISTEN = 363 - SYS_ACCEPT4 = 364 - SYS_GETSOCKOPT = 365 - SYS_SETSOCKOPT = 366 - SYS_GETSOCKNAME = 367 - SYS_GETPEERNAME = 368 - SYS_SENDTO = 369 - SYS_SENDMSG = 370 - SYS_RECVFROM = 371 - SYS_RECVMSG = 372 - SYS_SHUTDOWN = 373 - SYS_MLOCK2 = 374 - SYS_COPY_FILE_RANGE = 375 - SYS_PREADV2 = 376 - SYS_PWRITEV2 = 377 - SYS_S390_GUARDED_STORAGE = 378 - SYS_STATX = 379 - SYS_S390_STHYI = 380 - SYS_KEXEC_FILE_LOAD = 381 - SYS_IO_PGETEVENTS = 382 - SYS_RSEQ = 383 - SYS_PKEY_MPROTECT = 384 - SYS_PKEY_ALLOC = 385 - SYS_PKEY_FREE = 386 - SYS_SEMTIMEDOP = 392 - SYS_SEMGET = 393 - SYS_SEMCTL = 394 - SYS_SHMGET = 395 - SYS_SHMCTL = 396 - SYS_SHMAT = 397 - SYS_SHMDT = 398 - SYS_MSGGET = 399 - SYS_MSGSND = 400 - SYS_MSGRCV = 401 - SYS_MSGCTL = 402 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLONE3 = 435 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_RESTART_SYSCALL = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_BRK = 45 + SYS_SIGNAL = 48 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_SETPGID = 57 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_SYMLINK = 83 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_LOOKUP_DCOOKIE = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_GETDENTS = 141 + SYS_SELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_QUERY_MODULE = 167 + SYS_POLL = 168 + SYS_NFSSERVCTL = 169 + SYS_PRCTL = 172 + SYS_RT_SIGRETURN = 173 + SYS_RT_SIGACTION = 174 + SYS_RT_SIGPROCMASK = 175 + SYS_RT_SIGPENDING = 176 + SYS_RT_SIGTIMEDWAIT = 177 + SYS_RT_SIGQUEUEINFO = 178 + SYS_RT_SIGSUSPEND = 179 + SYS_PREAD64 = 180 + SYS_PWRITE64 = 181 + SYS_GETCWD = 183 + SYS_CAPGET = 184 + SYS_CAPSET = 185 + SYS_SIGALTSTACK = 186 + SYS_SENDFILE = 187 + SYS_GETPMSG = 188 + SYS_PUTPMSG = 189 + SYS_VFORK = 190 + SYS_GETRLIMIT = 191 + SYS_LCHOWN = 198 + SYS_GETUID = 199 + SYS_GETGID = 200 + SYS_GETEUID = 201 + SYS_GETEGID = 202 + SYS_SETREUID = 203 + SYS_SETREGID = 204 + SYS_GETGROUPS = 205 + SYS_SETGROUPS = 206 + SYS_FCHOWN = 207 + SYS_SETRESUID = 208 + SYS_GETRESUID = 209 + SYS_SETRESGID = 210 + SYS_GETRESGID = 211 + SYS_CHOWN = 212 + SYS_SETUID = 213 + SYS_SETGID = 214 + SYS_SETFSUID = 215 + SYS_SETFSGID = 216 + SYS_PIVOT_ROOT = 217 + SYS_MINCORE = 218 + SYS_MADVISE = 219 + SYS_GETDENTS64 = 220 + SYS_READAHEAD = 222 + SYS_SETXATTR = 224 + SYS_LSETXATTR = 225 + SYS_FSETXATTR = 226 + SYS_GETXATTR = 227 + SYS_LGETXATTR = 228 + SYS_FGETXATTR = 229 + SYS_LISTXATTR = 230 + SYS_LLISTXATTR = 231 + SYS_FLISTXATTR = 232 + SYS_REMOVEXATTR = 233 + SYS_LREMOVEXATTR = 234 + SYS_FREMOVEXATTR = 235 + SYS_GETTID = 236 + SYS_TKILL = 237 + SYS_FUTEX = 238 + SYS_SCHED_SETAFFINITY = 239 + SYS_SCHED_GETAFFINITY = 240 + SYS_TGKILL = 241 + SYS_IO_SETUP = 243 + SYS_IO_DESTROY = 244 + SYS_IO_GETEVENTS = 245 + SYS_IO_SUBMIT = 246 + SYS_IO_CANCEL = 247 + SYS_EXIT_GROUP = 248 + SYS_EPOLL_CREATE = 249 + SYS_EPOLL_CTL = 250 + SYS_EPOLL_WAIT = 251 + SYS_SET_TID_ADDRESS = 252 + SYS_FADVISE64 = 253 + SYS_TIMER_CREATE = 254 + SYS_TIMER_SETTIME = 255 + SYS_TIMER_GETTIME = 256 + SYS_TIMER_GETOVERRUN = 257 + SYS_TIMER_DELETE = 258 + SYS_CLOCK_SETTIME = 259 + SYS_CLOCK_GETTIME = 260 + SYS_CLOCK_GETRES = 261 + SYS_CLOCK_NANOSLEEP = 262 + SYS_STATFS64 = 265 + SYS_FSTATFS64 = 266 + SYS_REMAP_FILE_PAGES = 267 + SYS_MBIND = 268 + SYS_GET_MEMPOLICY = 269 + SYS_SET_MEMPOLICY = 270 + SYS_MQ_OPEN = 271 + SYS_MQ_UNLINK = 272 + SYS_MQ_TIMEDSEND = 273 + SYS_MQ_TIMEDRECEIVE = 274 + SYS_MQ_NOTIFY = 275 + SYS_MQ_GETSETATTR = 276 + SYS_KEXEC_LOAD = 277 + SYS_ADD_KEY = 278 + SYS_REQUEST_KEY = 279 + SYS_KEYCTL = 280 + SYS_WAITID = 281 + SYS_IOPRIO_SET = 282 + SYS_IOPRIO_GET = 283 + SYS_INOTIFY_INIT = 284 + SYS_INOTIFY_ADD_WATCH = 285 + SYS_INOTIFY_RM_WATCH = 286 + SYS_MIGRATE_PAGES = 287 + SYS_OPENAT = 288 + SYS_MKDIRAT = 289 + SYS_MKNODAT = 290 + SYS_FCHOWNAT = 291 + SYS_FUTIMESAT = 292 + SYS_NEWFSTATAT = 293 + SYS_UNLINKAT = 294 + SYS_RENAMEAT = 295 + SYS_LINKAT = 296 + SYS_SYMLINKAT = 297 + SYS_READLINKAT = 298 + SYS_FCHMODAT = 299 + SYS_FACCESSAT = 300 + SYS_PSELECT6 = 301 + SYS_PPOLL = 302 + SYS_UNSHARE = 303 + SYS_SET_ROBUST_LIST = 304 + SYS_GET_ROBUST_LIST = 305 + SYS_SPLICE = 306 + SYS_SYNC_FILE_RANGE = 307 + SYS_TEE = 308 + SYS_VMSPLICE = 309 + SYS_MOVE_PAGES = 310 + SYS_GETCPU = 311 + SYS_EPOLL_PWAIT = 312 + SYS_UTIMES = 313 + SYS_FALLOCATE = 314 + SYS_UTIMENSAT = 315 + SYS_SIGNALFD = 316 + SYS_TIMERFD = 317 + SYS_EVENTFD = 318 + SYS_TIMERFD_CREATE = 319 + SYS_TIMERFD_SETTIME = 320 + SYS_TIMERFD_GETTIME = 321 + SYS_SIGNALFD4 = 322 + SYS_EVENTFD2 = 323 + SYS_INOTIFY_INIT1 = 324 + SYS_PIPE2 = 325 + SYS_DUP3 = 326 + SYS_EPOLL_CREATE1 = 327 + SYS_PREADV = 328 + SYS_PWRITEV = 329 + SYS_RT_TGSIGQUEUEINFO = 330 + SYS_PERF_EVENT_OPEN = 331 + SYS_FANOTIFY_INIT = 332 + SYS_FANOTIFY_MARK = 333 + SYS_PRLIMIT64 = 334 + SYS_NAME_TO_HANDLE_AT = 335 + SYS_OPEN_BY_HANDLE_AT = 336 + SYS_CLOCK_ADJTIME = 337 + SYS_SYNCFS = 338 + SYS_SETNS = 339 + SYS_PROCESS_VM_READV = 340 + SYS_PROCESS_VM_WRITEV = 341 + SYS_S390_RUNTIME_INSTR = 342 + SYS_KCMP = 343 + SYS_FINIT_MODULE = 344 + SYS_SCHED_SETATTR = 345 + SYS_SCHED_GETATTR = 346 + SYS_RENAMEAT2 = 347 + SYS_SECCOMP = 348 + SYS_GETRANDOM = 349 + SYS_MEMFD_CREATE = 350 + SYS_BPF = 351 + SYS_S390_PCI_MMIO_WRITE = 352 + SYS_S390_PCI_MMIO_READ = 353 + SYS_EXECVEAT = 354 + SYS_USERFAULTFD = 355 + SYS_MEMBARRIER = 356 + SYS_RECVMMSG = 357 + SYS_SENDMMSG = 358 + SYS_SOCKET = 359 + SYS_SOCKETPAIR = 360 + SYS_BIND = 361 + SYS_CONNECT = 362 + SYS_LISTEN = 363 + SYS_ACCEPT4 = 364 + SYS_GETSOCKOPT = 365 + SYS_SETSOCKOPT = 366 + SYS_GETSOCKNAME = 367 + SYS_GETPEERNAME = 368 + SYS_SENDTO = 369 + SYS_SENDMSG = 370 + SYS_RECVFROM = 371 + SYS_RECVMSG = 372 + SYS_SHUTDOWN = 373 + SYS_MLOCK2 = 374 + SYS_COPY_FILE_RANGE = 375 + SYS_PREADV2 = 376 + SYS_PWRITEV2 = 377 + SYS_S390_GUARDED_STORAGE = 378 + SYS_STATX = 379 + SYS_S390_STHYI = 380 + SYS_KEXEC_FILE_LOAD = 381 + SYS_IO_PGETEVENTS = 382 + SYS_RSEQ = 383 + SYS_PKEY_MPROTECT = 384 + SYS_PKEY_ALLOC = 385 + SYS_PKEY_FREE = 386 + SYS_SEMTIMEDOP = 392 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 488ca848d..6a39640e7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -7,379 +7,386 @@ package unix const ( - SYS_RESTART_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAIT4 = 7 - SYS_CREAT = 8 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_EXECV = 11 - SYS_CHDIR = 12 - SYS_CHOWN = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_LCHOWN = 16 - SYS_BRK = 17 - SYS_PERFCTR = 18 - SYS_LSEEK = 19 - SYS_GETPID = 20 - SYS_CAPGET = 21 - SYS_CAPSET = 22 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_VMSPLICE = 25 - SYS_PTRACE = 26 - SYS_ALARM = 27 - SYS_SIGALTSTACK = 28 - SYS_PAUSE = 29 - SYS_UTIME = 30 - SYS_ACCESS = 33 - SYS_NICE = 34 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_STAT = 38 - SYS_SENDFILE = 39 - SYS_LSTAT = 40 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_TIMES = 43 - SYS_UMOUNT2 = 45 - SYS_SETGID = 46 - SYS_GETGID = 47 - SYS_SIGNAL = 48 - SYS_GETEUID = 49 - SYS_GETEGID = 50 - SYS_ACCT = 51 - SYS_MEMORY_ORDERING = 52 - SYS_IOCTL = 54 - SYS_REBOOT = 55 - SYS_SYMLINK = 57 - SYS_READLINK = 58 - SYS_EXECVE = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_FSTAT = 62 - SYS_FSTAT64 = 63 - SYS_GETPAGESIZE = 64 - SYS_MSYNC = 65 - SYS_VFORK = 66 - SYS_PREAD64 = 67 - SYS_PWRITE64 = 68 - SYS_MMAP = 71 - SYS_MUNMAP = 73 - SYS_MPROTECT = 74 - SYS_MADVISE = 75 - SYS_VHANGUP = 76 - SYS_MINCORE = 78 - SYS_GETGROUPS = 79 - SYS_SETGROUPS = 80 - SYS_GETPGRP = 81 - SYS_SETITIMER = 83 - SYS_SWAPON = 85 - SYS_GETITIMER = 86 - SYS_SETHOSTNAME = 88 - SYS_DUP2 = 90 - SYS_FCNTL = 92 - SYS_SELECT = 93 - SYS_FSYNC = 95 - SYS_SETPRIORITY = 96 - SYS_SOCKET = 97 - SYS_CONNECT = 98 - SYS_ACCEPT = 99 - SYS_GETPRIORITY = 100 - SYS_RT_SIGRETURN = 101 - SYS_RT_SIGACTION = 102 - SYS_RT_SIGPROCMASK = 103 - SYS_RT_SIGPENDING = 104 - SYS_RT_SIGTIMEDWAIT = 105 - SYS_RT_SIGQUEUEINFO = 106 - SYS_RT_SIGSUSPEND = 107 - SYS_SETRESUID = 108 - SYS_GETRESUID = 109 - SYS_SETRESGID = 110 - SYS_GETRESGID = 111 - SYS_RECVMSG = 113 - SYS_SENDMSG = 114 - SYS_GETTIMEOFDAY = 116 - SYS_GETRUSAGE = 117 - SYS_GETSOCKOPT = 118 - SYS_GETCWD = 119 - SYS_READV = 120 - SYS_WRITEV = 121 - SYS_SETTIMEOFDAY = 122 - SYS_FCHOWN = 123 - SYS_FCHMOD = 124 - SYS_RECVFROM = 125 - SYS_SETREUID = 126 - SYS_SETREGID = 127 - SYS_RENAME = 128 - SYS_TRUNCATE = 129 - SYS_FTRUNCATE = 130 - SYS_FLOCK = 131 - SYS_LSTAT64 = 132 - SYS_SENDTO = 133 - SYS_SHUTDOWN = 134 - SYS_SOCKETPAIR = 135 - SYS_MKDIR = 136 - SYS_RMDIR = 137 - SYS_UTIMES = 138 - SYS_STAT64 = 139 - SYS_SENDFILE64 = 140 - SYS_GETPEERNAME = 141 - SYS_FUTEX = 142 - SYS_GETTID = 143 - SYS_GETRLIMIT = 144 - SYS_SETRLIMIT = 145 - SYS_PIVOT_ROOT = 146 - SYS_PRCTL = 147 - SYS_PCICONFIG_READ = 148 - SYS_PCICONFIG_WRITE = 149 - SYS_GETSOCKNAME = 150 - SYS_INOTIFY_INIT = 151 - SYS_INOTIFY_ADD_WATCH = 152 - SYS_POLL = 153 - SYS_GETDENTS64 = 154 - SYS_INOTIFY_RM_WATCH = 156 - SYS_STATFS = 157 - SYS_FSTATFS = 158 - SYS_UMOUNT = 159 - SYS_SCHED_SET_AFFINITY = 160 - SYS_SCHED_GET_AFFINITY = 161 - SYS_GETDOMAINNAME = 162 - SYS_SETDOMAINNAME = 163 - SYS_UTRAP_INSTALL = 164 - SYS_QUOTACTL = 165 - SYS_SET_TID_ADDRESS = 166 - SYS_MOUNT = 167 - SYS_USTAT = 168 - SYS_SETXATTR = 169 - SYS_LSETXATTR = 170 - SYS_FSETXATTR = 171 - SYS_GETXATTR = 172 - SYS_LGETXATTR = 173 - SYS_GETDENTS = 174 - SYS_SETSID = 175 - SYS_FCHDIR = 176 - SYS_FGETXATTR = 177 - SYS_LISTXATTR = 178 - SYS_LLISTXATTR = 179 - SYS_FLISTXATTR = 180 - SYS_REMOVEXATTR = 181 - SYS_LREMOVEXATTR = 182 - SYS_SIGPENDING = 183 - SYS_QUERY_MODULE = 184 - SYS_SETPGID = 185 - SYS_FREMOVEXATTR = 186 - SYS_TKILL = 187 - SYS_EXIT_GROUP = 188 - SYS_UNAME = 189 - SYS_INIT_MODULE = 190 - SYS_PERSONALITY = 191 - SYS_REMAP_FILE_PAGES = 192 - SYS_EPOLL_CREATE = 193 - SYS_EPOLL_CTL = 194 - SYS_EPOLL_WAIT = 195 - SYS_IOPRIO_SET = 196 - SYS_GETPPID = 197 - SYS_SIGACTION = 198 - SYS_SGETMASK = 199 - SYS_SSETMASK = 200 - SYS_SIGSUSPEND = 201 - SYS_OLDLSTAT = 202 - SYS_USELIB = 203 - SYS_READDIR = 204 - SYS_READAHEAD = 205 - SYS_SOCKETCALL = 206 - SYS_SYSLOG = 207 - SYS_LOOKUP_DCOOKIE = 208 - SYS_FADVISE64 = 209 - SYS_FADVISE64_64 = 210 - SYS_TGKILL = 211 - SYS_WAITPID = 212 - SYS_SWAPOFF = 213 - SYS_SYSINFO = 214 - SYS_IPC = 215 - SYS_SIGRETURN = 216 - SYS_CLONE = 217 - SYS_IOPRIO_GET = 218 - SYS_ADJTIMEX = 219 - SYS_SIGPROCMASK = 220 - SYS_CREATE_MODULE = 221 - SYS_DELETE_MODULE = 222 - SYS_GET_KERNEL_SYMS = 223 - SYS_GETPGID = 224 - SYS_BDFLUSH = 225 - SYS_SYSFS = 226 - SYS_AFS_SYSCALL = 227 - SYS_SETFSUID = 228 - SYS_SETFSGID = 229 - SYS__NEWSELECT = 230 - SYS_SPLICE = 232 - SYS_STIME = 233 - SYS_STATFS64 = 234 - SYS_FSTATFS64 = 235 - SYS__LLSEEK = 236 - SYS_MLOCK = 237 - SYS_MUNLOCK = 238 - SYS_MLOCKALL = 239 - SYS_MUNLOCKALL = 240 - SYS_SCHED_SETPARAM = 241 - SYS_SCHED_GETPARAM = 242 - SYS_SCHED_SETSCHEDULER = 243 - SYS_SCHED_GETSCHEDULER = 244 - SYS_SCHED_YIELD = 245 - SYS_SCHED_GET_PRIORITY_MAX = 246 - SYS_SCHED_GET_PRIORITY_MIN = 247 - SYS_SCHED_RR_GET_INTERVAL = 248 - SYS_NANOSLEEP = 249 - SYS_MREMAP = 250 - SYS__SYSCTL = 251 - SYS_GETSID = 252 - SYS_FDATASYNC = 253 - SYS_NFSSERVCTL = 254 - SYS_SYNC_FILE_RANGE = 255 - SYS_CLOCK_SETTIME = 256 - SYS_CLOCK_GETTIME = 257 - SYS_CLOCK_GETRES = 258 - SYS_CLOCK_NANOSLEEP = 259 - SYS_SCHED_GETAFFINITY = 260 - SYS_SCHED_SETAFFINITY = 261 - SYS_TIMER_SETTIME = 262 - SYS_TIMER_GETTIME = 263 - SYS_TIMER_GETOVERRUN = 264 - SYS_TIMER_DELETE = 265 - SYS_TIMER_CREATE = 266 - SYS_VSERVER = 267 - SYS_IO_SETUP = 268 - SYS_IO_DESTROY = 269 - SYS_IO_SUBMIT = 270 - SYS_IO_CANCEL = 271 - SYS_IO_GETEVENTS = 272 - SYS_MQ_OPEN = 273 - SYS_MQ_UNLINK = 274 - SYS_MQ_TIMEDSEND = 275 - SYS_MQ_TIMEDRECEIVE = 276 - SYS_MQ_NOTIFY = 277 - SYS_MQ_GETSETATTR = 278 - SYS_WAITID = 279 - SYS_TEE = 280 - SYS_ADD_KEY = 281 - SYS_REQUEST_KEY = 282 - SYS_KEYCTL = 283 - SYS_OPENAT = 284 - SYS_MKDIRAT = 285 - SYS_MKNODAT = 286 - SYS_FCHOWNAT = 287 - SYS_FUTIMESAT = 288 - SYS_FSTATAT64 = 289 - SYS_UNLINKAT = 290 - SYS_RENAMEAT = 291 - SYS_LINKAT = 292 - SYS_SYMLINKAT = 293 - SYS_READLINKAT = 294 - SYS_FCHMODAT = 295 - SYS_FACCESSAT = 296 - SYS_PSELECT6 = 297 - SYS_PPOLL = 298 - SYS_UNSHARE = 299 - SYS_SET_ROBUST_LIST = 300 - SYS_GET_ROBUST_LIST = 301 - SYS_MIGRATE_PAGES = 302 - SYS_MBIND = 303 - SYS_GET_MEMPOLICY = 304 - SYS_SET_MEMPOLICY = 305 - SYS_KEXEC_LOAD = 306 - SYS_MOVE_PAGES = 307 - SYS_GETCPU = 308 - SYS_EPOLL_PWAIT = 309 - SYS_UTIMENSAT = 310 - SYS_SIGNALFD = 311 - SYS_TIMERFD_CREATE = 312 - SYS_EVENTFD = 313 - SYS_FALLOCATE = 314 - SYS_TIMERFD_SETTIME = 315 - SYS_TIMERFD_GETTIME = 316 - SYS_SIGNALFD4 = 317 - SYS_EVENTFD2 = 318 - SYS_EPOLL_CREATE1 = 319 - SYS_DUP3 = 320 - SYS_PIPE2 = 321 - SYS_INOTIFY_INIT1 = 322 - SYS_ACCEPT4 = 323 - SYS_PREADV = 324 - SYS_PWRITEV = 325 - SYS_RT_TGSIGQUEUEINFO = 326 - SYS_PERF_EVENT_OPEN = 327 - SYS_RECVMMSG = 328 - SYS_FANOTIFY_INIT = 329 - SYS_FANOTIFY_MARK = 330 - SYS_PRLIMIT64 = 331 - SYS_NAME_TO_HANDLE_AT = 332 - SYS_OPEN_BY_HANDLE_AT = 333 - SYS_CLOCK_ADJTIME = 334 - SYS_SYNCFS = 335 - SYS_SENDMMSG = 336 - SYS_SETNS = 337 - SYS_PROCESS_VM_READV = 338 - SYS_PROCESS_VM_WRITEV = 339 - SYS_KERN_FEATURES = 340 - SYS_KCMP = 341 - SYS_FINIT_MODULE = 342 - SYS_SCHED_SETATTR = 343 - SYS_SCHED_GETATTR = 344 - SYS_RENAMEAT2 = 345 - SYS_SECCOMP = 346 - SYS_GETRANDOM = 347 - SYS_MEMFD_CREATE = 348 - SYS_BPF = 349 - SYS_EXECVEAT = 350 - SYS_MEMBARRIER = 351 - SYS_USERFAULTFD = 352 - SYS_BIND = 353 - SYS_LISTEN = 354 - SYS_SETSOCKOPT = 355 - SYS_MLOCK2 = 356 - SYS_COPY_FILE_RANGE = 357 - SYS_PREADV2 = 358 - SYS_PWRITEV2 = 359 - SYS_STATX = 360 - SYS_IO_PGETEVENTS = 361 - SYS_PKEY_MPROTECT = 362 - SYS_PKEY_ALLOC = 363 - SYS_PKEY_FREE = 364 - SYS_RSEQ = 365 - SYS_SEMTIMEDOP = 392 - SYS_SEMGET = 393 - SYS_SEMCTL = 394 - SYS_SHMGET = 395 - SYS_SHMCTL = 396 - SYS_SHMAT = 397 - SYS_SHMDT = 398 - SYS_MSGGET = 399 - SYS_MSGSND = 400 - SYS_MSGRCV = 401 - SYS_MSGCTL = 402 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAIT4 = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECV = 11 + SYS_CHDIR = 12 + SYS_CHOWN = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BRK = 17 + SYS_PERFCTR = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_CAPGET = 21 + SYS_CAPSET = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_VMSPLICE = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_SIGALTSTACK = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_STAT = 38 + SYS_SENDFILE = 39 + SYS_LSTAT = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_UMOUNT2 = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_MEMORY_ORDERING = 52 + SYS_IOCTL = 54 + SYS_REBOOT = 55 + SYS_SYMLINK = 57 + SYS_READLINK = 58 + SYS_EXECVE = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_FSTAT = 62 + SYS_FSTAT64 = 63 + SYS_GETPAGESIZE = 64 + SYS_MSYNC = 65 + SYS_VFORK = 66 + SYS_PREAD64 = 67 + SYS_PWRITE64 = 68 + SYS_MMAP = 71 + SYS_MUNMAP = 73 + SYS_MPROTECT = 74 + SYS_MADVISE = 75 + SYS_VHANGUP = 76 + SYS_MINCORE = 78 + SYS_GETGROUPS = 79 + SYS_SETGROUPS = 80 + SYS_GETPGRP = 81 + SYS_SETITIMER = 83 + SYS_SWAPON = 85 + SYS_GETITIMER = 86 + SYS_SETHOSTNAME = 88 + SYS_DUP2 = 90 + SYS_FCNTL = 92 + SYS_SELECT = 93 + SYS_FSYNC = 95 + SYS_SETPRIORITY = 96 + SYS_SOCKET = 97 + SYS_CONNECT = 98 + SYS_ACCEPT = 99 + SYS_GETPRIORITY = 100 + SYS_RT_SIGRETURN = 101 + SYS_RT_SIGACTION = 102 + SYS_RT_SIGPROCMASK = 103 + SYS_RT_SIGPENDING = 104 + SYS_RT_SIGTIMEDWAIT = 105 + SYS_RT_SIGQUEUEINFO = 106 + SYS_RT_SIGSUSPEND = 107 + SYS_SETRESUID = 108 + SYS_GETRESUID = 109 + SYS_SETRESGID = 110 + SYS_GETRESGID = 111 + SYS_RECVMSG = 113 + SYS_SENDMSG = 114 + SYS_GETTIMEOFDAY = 116 + SYS_GETRUSAGE = 117 + SYS_GETSOCKOPT = 118 + SYS_GETCWD = 119 + SYS_READV = 120 + SYS_WRITEV = 121 + SYS_SETTIMEOFDAY = 122 + SYS_FCHOWN = 123 + SYS_FCHMOD = 124 + SYS_RECVFROM = 125 + SYS_SETREUID = 126 + SYS_SETREGID = 127 + SYS_RENAME = 128 + SYS_TRUNCATE = 129 + SYS_FTRUNCATE = 130 + SYS_FLOCK = 131 + SYS_LSTAT64 = 132 + SYS_SENDTO = 133 + SYS_SHUTDOWN = 134 + SYS_SOCKETPAIR = 135 + SYS_MKDIR = 136 + SYS_RMDIR = 137 + SYS_UTIMES = 138 + SYS_STAT64 = 139 + SYS_SENDFILE64 = 140 + SYS_GETPEERNAME = 141 + SYS_FUTEX = 142 + SYS_GETTID = 143 + SYS_GETRLIMIT = 144 + SYS_SETRLIMIT = 145 + SYS_PIVOT_ROOT = 146 + SYS_PRCTL = 147 + SYS_PCICONFIG_READ = 148 + SYS_PCICONFIG_WRITE = 149 + SYS_GETSOCKNAME = 150 + SYS_INOTIFY_INIT = 151 + SYS_INOTIFY_ADD_WATCH = 152 + SYS_POLL = 153 + SYS_GETDENTS64 = 154 + SYS_INOTIFY_RM_WATCH = 156 + SYS_STATFS = 157 + SYS_FSTATFS = 158 + SYS_UMOUNT = 159 + SYS_SCHED_SET_AFFINITY = 160 + SYS_SCHED_GET_AFFINITY = 161 + SYS_GETDOMAINNAME = 162 + SYS_SETDOMAINNAME = 163 + SYS_UTRAP_INSTALL = 164 + SYS_QUOTACTL = 165 + SYS_SET_TID_ADDRESS = 166 + SYS_MOUNT = 167 + SYS_USTAT = 168 + SYS_SETXATTR = 169 + SYS_LSETXATTR = 170 + SYS_FSETXATTR = 171 + SYS_GETXATTR = 172 + SYS_LGETXATTR = 173 + SYS_GETDENTS = 174 + SYS_SETSID = 175 + SYS_FCHDIR = 176 + SYS_FGETXATTR = 177 + SYS_LISTXATTR = 178 + SYS_LLISTXATTR = 179 + SYS_FLISTXATTR = 180 + SYS_REMOVEXATTR = 181 + SYS_LREMOVEXATTR = 182 + SYS_SIGPENDING = 183 + SYS_QUERY_MODULE = 184 + SYS_SETPGID = 185 + SYS_FREMOVEXATTR = 186 + SYS_TKILL = 187 + SYS_EXIT_GROUP = 188 + SYS_UNAME = 189 + SYS_INIT_MODULE = 190 + SYS_PERSONALITY = 191 + SYS_REMAP_FILE_PAGES = 192 + SYS_EPOLL_CREATE = 193 + SYS_EPOLL_CTL = 194 + SYS_EPOLL_WAIT = 195 + SYS_IOPRIO_SET = 196 + SYS_GETPPID = 197 + SYS_SIGACTION = 198 + SYS_SGETMASK = 199 + SYS_SSETMASK = 200 + SYS_SIGSUSPEND = 201 + SYS_OLDLSTAT = 202 + SYS_USELIB = 203 + SYS_READDIR = 204 + SYS_READAHEAD = 205 + SYS_SOCKETCALL = 206 + SYS_SYSLOG = 207 + SYS_LOOKUP_DCOOKIE = 208 + SYS_FADVISE64 = 209 + SYS_FADVISE64_64 = 210 + SYS_TGKILL = 211 + SYS_WAITPID = 212 + SYS_SWAPOFF = 213 + SYS_SYSINFO = 214 + SYS_IPC = 215 + SYS_SIGRETURN = 216 + SYS_CLONE = 217 + SYS_IOPRIO_GET = 218 + SYS_ADJTIMEX = 219 + SYS_SIGPROCMASK = 220 + SYS_CREATE_MODULE = 221 + SYS_DELETE_MODULE = 222 + SYS_GET_KERNEL_SYMS = 223 + SYS_GETPGID = 224 + SYS_BDFLUSH = 225 + SYS_SYSFS = 226 + SYS_AFS_SYSCALL = 227 + SYS_SETFSUID = 228 + SYS_SETFSGID = 229 + SYS__NEWSELECT = 230 + SYS_SPLICE = 232 + SYS_STIME = 233 + SYS_STATFS64 = 234 + SYS_FSTATFS64 = 235 + SYS__LLSEEK = 236 + SYS_MLOCK = 237 + SYS_MUNLOCK = 238 + SYS_MLOCKALL = 239 + SYS_MUNLOCKALL = 240 + SYS_SCHED_SETPARAM = 241 + SYS_SCHED_GETPARAM = 242 + SYS_SCHED_SETSCHEDULER = 243 + SYS_SCHED_GETSCHEDULER = 244 + SYS_SCHED_YIELD = 245 + SYS_SCHED_GET_PRIORITY_MAX = 246 + SYS_SCHED_GET_PRIORITY_MIN = 247 + SYS_SCHED_RR_GET_INTERVAL = 248 + SYS_NANOSLEEP = 249 + SYS_MREMAP = 250 + SYS__SYSCTL = 251 + SYS_GETSID = 252 + SYS_FDATASYNC = 253 + SYS_NFSSERVCTL = 254 + SYS_SYNC_FILE_RANGE = 255 + SYS_CLOCK_SETTIME = 256 + SYS_CLOCK_GETTIME = 257 + SYS_CLOCK_GETRES = 258 + SYS_CLOCK_NANOSLEEP = 259 + SYS_SCHED_GETAFFINITY = 260 + SYS_SCHED_SETAFFINITY = 261 + SYS_TIMER_SETTIME = 262 + SYS_TIMER_GETTIME = 263 + SYS_TIMER_GETOVERRUN = 264 + SYS_TIMER_DELETE = 265 + SYS_TIMER_CREATE = 266 + SYS_VSERVER = 267 + SYS_IO_SETUP = 268 + SYS_IO_DESTROY = 269 + SYS_IO_SUBMIT = 270 + SYS_IO_CANCEL = 271 + SYS_IO_GETEVENTS = 272 + SYS_MQ_OPEN = 273 + SYS_MQ_UNLINK = 274 + SYS_MQ_TIMEDSEND = 275 + SYS_MQ_TIMEDRECEIVE = 276 + SYS_MQ_NOTIFY = 277 + SYS_MQ_GETSETATTR = 278 + SYS_WAITID = 279 + SYS_TEE = 280 + SYS_ADD_KEY = 281 + SYS_REQUEST_KEY = 282 + SYS_KEYCTL = 283 + SYS_OPENAT = 284 + SYS_MKDIRAT = 285 + SYS_MKNODAT = 286 + SYS_FCHOWNAT = 287 + SYS_FUTIMESAT = 288 + SYS_FSTATAT64 = 289 + SYS_UNLINKAT = 290 + SYS_RENAMEAT = 291 + SYS_LINKAT = 292 + SYS_SYMLINKAT = 293 + SYS_READLINKAT = 294 + SYS_FCHMODAT = 295 + SYS_FACCESSAT = 296 + SYS_PSELECT6 = 297 + SYS_PPOLL = 298 + SYS_UNSHARE = 299 + SYS_SET_ROBUST_LIST = 300 + SYS_GET_ROBUST_LIST = 301 + SYS_MIGRATE_PAGES = 302 + SYS_MBIND = 303 + SYS_GET_MEMPOLICY = 304 + SYS_SET_MEMPOLICY = 305 + SYS_KEXEC_LOAD = 306 + SYS_MOVE_PAGES = 307 + SYS_GETCPU = 308 + SYS_EPOLL_PWAIT = 309 + SYS_UTIMENSAT = 310 + SYS_SIGNALFD = 311 + SYS_TIMERFD_CREATE = 312 + SYS_EVENTFD = 313 + SYS_FALLOCATE = 314 + SYS_TIMERFD_SETTIME = 315 + SYS_TIMERFD_GETTIME = 316 + SYS_SIGNALFD4 = 317 + SYS_EVENTFD2 = 318 + SYS_EPOLL_CREATE1 = 319 + SYS_DUP3 = 320 + SYS_PIPE2 = 321 + SYS_INOTIFY_INIT1 = 322 + SYS_ACCEPT4 = 323 + SYS_PREADV = 324 + SYS_PWRITEV = 325 + SYS_RT_TGSIGQUEUEINFO = 326 + SYS_PERF_EVENT_OPEN = 327 + SYS_RECVMMSG = 328 + SYS_FANOTIFY_INIT = 329 + SYS_FANOTIFY_MARK = 330 + SYS_PRLIMIT64 = 331 + SYS_NAME_TO_HANDLE_AT = 332 + SYS_OPEN_BY_HANDLE_AT = 333 + SYS_CLOCK_ADJTIME = 334 + SYS_SYNCFS = 335 + SYS_SENDMMSG = 336 + SYS_SETNS = 337 + SYS_PROCESS_VM_READV = 338 + SYS_PROCESS_VM_WRITEV = 339 + SYS_KERN_FEATURES = 340 + SYS_KCMP = 341 + SYS_FINIT_MODULE = 342 + SYS_SCHED_SETATTR = 343 + SYS_SCHED_GETATTR = 344 + SYS_RENAMEAT2 = 345 + SYS_SECCOMP = 346 + SYS_GETRANDOM = 347 + SYS_MEMFD_CREATE = 348 + SYS_BPF = 349 + SYS_EXECVEAT = 350 + SYS_MEMBARRIER = 351 + SYS_USERFAULTFD = 352 + SYS_BIND = 353 + SYS_LISTEN = 354 + SYS_SETSOCKOPT = 355 + SYS_MLOCK2 = 356 + SYS_COPY_FILE_RANGE = 357 + SYS_PREADV2 = 358 + SYS_PWRITEV2 = 359 + SYS_STATX = 360 + SYS_IO_PGETEVENTS = 361 + SYS_PKEY_MPROTECT = 362 + SYS_PKEY_ALLOC = 363 + SYS_PKEY_FREE = 364 + SYS_RSEQ = 365 + SYS_SEMTIMEDOP = 392 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 2673e6c59..e2a64f099 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -209,6 +209,92 @@ type RawSockaddrCtl struct { Sc_reserved [5]uint32 } +type RawSockaddrVM struct { + Len uint8 + Family uint8 + Reserved1 uint16 + Port uint32 + Cid uint32 +} + +type XVSockPCB struct { + Xv_len uint32 + Xv_vsockpp uint64 + Xvp_local_cid uint32 + Xvp_local_port uint32 + Xvp_remote_cid uint32 + Xvp_remote_port uint32 + Xvp_rxcnt uint32 + Xvp_txcnt uint32 + Xvp_peer_rxhiwat uint32 + Xvp_peer_rxcnt uint32 + Xvp_last_pid int32 + Xvp_gencnt uint64 + Xv_socket XSocket + _ [4]byte +} + +type XSocket struct { + Xso_len uint32 + Xso_so uint32 + So_type int16 + So_options int16 + So_linger int16 + So_state int16 + So_pcb uint32 + Xso_protocol int32 + Xso_family int32 + So_qlen int16 + So_incqlen int16 + So_qlimit int16 + So_timeo int16 + So_error uint16 + So_pgid int32 + So_oobmark uint32 + So_rcv XSockbuf + So_snd XSockbuf + So_uid uint32 +} + +type XSocket64 struct { + Xso_len uint32 + _ [8]byte + So_type int16 + So_options int16 + So_linger int16 + So_state int16 + _ [8]byte + Xso_protocol int32 + Xso_family int32 + So_qlen int16 + So_incqlen int16 + So_qlimit int16 + So_timeo int16 + So_error uint16 + So_pgid int32 + So_oobmark uint32 + So_rcv XSockbuf + So_snd XSockbuf + So_uid uint32 +} + +type XSockbuf struct { + Cc uint32 + Hiwat uint32 + Mbcnt uint32 + Mbmax uint32 + Lowat int32 + Flags int16 + Timeo int16 +} + +type XVSockPgen struct { + Len uint32 + Count uint64 + Gen uint64 + Sogen uint64 +} + type _Socklen uint32 type Xucred struct { @@ -280,25 +366,57 @@ type ICMPv6Filter struct { Filt [8]uint32 } +type TCPConnectionInfo struct { + State uint8 + Snd_wscale uint8 + Rcv_wscale uint8 + _ uint8 + Options uint32 + Flags uint32 + Rto uint32 + Maxseg uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Snd_wnd uint32 + Snd_sbbytes uint32 + Rcv_wnd uint32 + Rttcur uint32 + Srtt uint32 + Rttvar uint32 + Txpackets uint64 + Txbytes uint64 + Txretransmitbytes uint64 + Rxpackets uint64 + Rxbytes uint64 + Rxoutoforderbytes uint64 + Txretransmitpackets uint64 +} + const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x6c - SizeofSockaddrUnix = 0x6a - SizeofSockaddrDatalink = 0x14 - SizeofSockaddrCtl = 0x20 - SizeofXucred = 0x4c - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x30 - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofSockaddrCtl = 0x20 + SizeofSockaddrVM = 0xc + SizeofXvsockpcb = 0xa8 + SizeofXSocket = 0x64 + SizeofXSockbuf = 0x18 + SizeofXVSockPgen = 0x20 + SizeofXucred = 0x4c + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofTCPConnectionInfo = 0x70 ) const ( @@ -535,3 +653,143 @@ type CtlInfo struct { Id uint32 Name [96]byte } + +const SizeofKinfoProc = 0x288 + +type Eproc struct { + Paddr uintptr + Sess uintptr + Pcred Pcred + Ucred Ucred + Vm Vmspace + Ppid int32 + Pgid int32 + Jobc int16 + Tdev int32 + Tpgid int32 + Tsess uintptr + Wmesg [8]byte + Xsize int32 + Xrssize int16 + Xccount int16 + Xswrss int16 + Flag int32 + Login [12]byte + Spare [4]int32 + _ [4]byte +} + +type ExternProc struct { + P_starttime Timeval + P_vmspace *Vmspace + P_sigacts uintptr + P_flag int32 + P_stat int8 + P_pid int32 + P_oppid int32 + P_dupfd int32 + User_stack *int8 + Exit_thread *byte + P_debugger int32 + Sigwait int32 + P_estcpu uint32 + P_cpticks int32 + P_pctcpu uint32 + P_wchan *byte + P_wmesg *int8 + P_swtime uint32 + P_slptime uint32 + P_realtimer Itimerval + P_rtime Timeval + P_uticks uint64 + P_sticks uint64 + P_iticks uint64 + P_traceflag int32 + P_tracep uintptr + P_siglist int32 + P_textvp uintptr + P_holdcnt int32 + P_sigmask uint32 + P_sigignore uint32 + P_sigcatch uint32 + P_priority uint8 + P_usrpri uint8 + P_nice int8 + P_comm [17]byte + P_pgrp uintptr + P_addr uintptr + P_xstat uint16 + P_acflag uint16 + P_ru *Rusage +} + +type Itimerval struct { + Interval Timeval + Value Timeval +} + +type KinfoProc struct { + Proc ExternProc + Eproc Eproc +} + +type Vmspace struct { + Dummy int32 + Dummy2 *int8 + Dummy3 [5]int32 + Dummy4 [3]*int8 +} + +type Pcred struct { + Pc_lock [72]int8 + Pc_ucred uintptr + P_ruid uint32 + P_svuid uint32 + P_rgid uint32 + P_svgid uint32 + P_refcnt int32 + _ [4]byte +} + +type Ucred struct { + Ref int32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 +} + +type SysvIpcPerm struct { + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint16 + _ uint16 + _ int32 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Lpid int32 + Cpid int32 + Nattch uint16 + _ [34]byte +} + +const ( + IPC_CREAT = 0x200 + IPC_EXCL = 0x400 + IPC_NOWAIT = 0x800 + IPC_PRIVATE = 0x0 +) + +const ( + IPC_RMID = 0x0 + IPC_SET = 0x1 + IPC_STAT = 0x2 +) + +const ( + SHM_RDONLY = 0x1000 + SHM_RND = 0x2000 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 1465cbcff..34aa77521 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -209,6 +209,92 @@ type RawSockaddrCtl struct { Sc_reserved [5]uint32 } +type RawSockaddrVM struct { + Len uint8 + Family uint8 + Reserved1 uint16 + Port uint32 + Cid uint32 +} + +type XVSockPCB struct { + Xv_len uint32 + Xv_vsockpp uint64 + Xvp_local_cid uint32 + Xvp_local_port uint32 + Xvp_remote_cid uint32 + Xvp_remote_port uint32 + Xvp_rxcnt uint32 + Xvp_txcnt uint32 + Xvp_peer_rxhiwat uint32 + Xvp_peer_rxcnt uint32 + Xvp_last_pid int32 + Xvp_gencnt uint64 + Xv_socket XSocket + _ [4]byte +} + +type XSocket struct { + Xso_len uint32 + Xso_so uint32 + So_type int16 + So_options int16 + So_linger int16 + So_state int16 + So_pcb uint32 + Xso_protocol int32 + Xso_family int32 + So_qlen int16 + So_incqlen int16 + So_qlimit int16 + So_timeo int16 + So_error uint16 + So_pgid int32 + So_oobmark uint32 + So_rcv XSockbuf + So_snd XSockbuf + So_uid uint32 +} + +type XSocket64 struct { + Xso_len uint32 + _ [8]byte + So_type int16 + So_options int16 + So_linger int16 + So_state int16 + _ [8]byte + Xso_protocol int32 + Xso_family int32 + So_qlen int16 + So_incqlen int16 + So_qlimit int16 + So_timeo int16 + So_error uint16 + So_pgid int32 + So_oobmark uint32 + So_rcv XSockbuf + So_snd XSockbuf + So_uid uint32 +} + +type XSockbuf struct { + Cc uint32 + Hiwat uint32 + Mbcnt uint32 + Mbmax uint32 + Lowat int32 + Flags int16 + Timeo int16 +} + +type XVSockPgen struct { + Len uint32 + Count uint64 + Gen uint64 + Sogen uint64 +} + type _Socklen uint32 type Xucred struct { @@ -280,25 +366,57 @@ type ICMPv6Filter struct { Filt [8]uint32 } +type TCPConnectionInfo struct { + State uint8 + Snd_wscale uint8 + Rcv_wscale uint8 + _ uint8 + Options uint32 + Flags uint32 + Rto uint32 + Maxseg uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Snd_wnd uint32 + Snd_sbbytes uint32 + Rcv_wnd uint32 + Rttcur uint32 + Srtt uint32 + Rttvar uint32 + Txpackets uint64 + Txbytes uint64 + Txretransmitbytes uint64 + Rxpackets uint64 + Rxbytes uint64 + Rxoutoforderbytes uint64 + Txretransmitpackets uint64 +} + const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x6c - SizeofSockaddrUnix = 0x6a - SizeofSockaddrDatalink = 0x14 - SizeofSockaddrCtl = 0x20 - SizeofXucred = 0x4c - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x30 - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofSockaddrCtl = 0x20 + SizeofSockaddrVM = 0xc + SizeofXvsockpcb = 0xa8 + SizeofXSocket = 0x64 + SizeofXSockbuf = 0x18 + SizeofXVSockPgen = 0x20 + SizeofXucred = 0x4c + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofTCPConnectionInfo = 0x70 ) const ( @@ -535,3 +653,143 @@ type CtlInfo struct { Id uint32 Name [96]byte } + +const SizeofKinfoProc = 0x288 + +type Eproc struct { + Paddr uintptr + Sess uintptr + Pcred Pcred + Ucred Ucred + Vm Vmspace + Ppid int32 + Pgid int32 + Jobc int16 + Tdev int32 + Tpgid int32 + Tsess uintptr + Wmesg [8]byte + Xsize int32 + Xrssize int16 + Xccount int16 + Xswrss int16 + Flag int32 + Login [12]byte + Spare [4]int32 + _ [4]byte +} + +type ExternProc struct { + P_starttime Timeval + P_vmspace *Vmspace + P_sigacts uintptr + P_flag int32 + P_stat int8 + P_pid int32 + P_oppid int32 + P_dupfd int32 + User_stack *int8 + Exit_thread *byte + P_debugger int32 + Sigwait int32 + P_estcpu uint32 + P_cpticks int32 + P_pctcpu uint32 + P_wchan *byte + P_wmesg *int8 + P_swtime uint32 + P_slptime uint32 + P_realtimer Itimerval + P_rtime Timeval + P_uticks uint64 + P_sticks uint64 + P_iticks uint64 + P_traceflag int32 + P_tracep uintptr + P_siglist int32 + P_textvp uintptr + P_holdcnt int32 + P_sigmask uint32 + P_sigignore uint32 + P_sigcatch uint32 + P_priority uint8 + P_usrpri uint8 + P_nice int8 + P_comm [17]byte + P_pgrp uintptr + P_addr uintptr + P_xstat uint16 + P_acflag uint16 + P_ru *Rusage +} + +type Itimerval struct { + Interval Timeval + Value Timeval +} + +type KinfoProc struct { + Proc ExternProc + Eproc Eproc +} + +type Vmspace struct { + Dummy int32 + Dummy2 *int8 + Dummy3 [5]int32 + Dummy4 [3]*int8 +} + +type Pcred struct { + Pc_lock [72]int8 + Pc_ucred uintptr + P_ruid uint32 + P_svuid uint32 + P_rgid uint32 + P_svgid uint32 + P_refcnt int32 + _ [4]byte +} + +type Ucred struct { + Ref int32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 +} + +type SysvIpcPerm struct { + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint16 + _ uint16 + _ int32 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Lpid int32 + Cpid int32 + Nattch uint16 + _ [34]byte +} + +const ( + IPC_CREAT = 0x200 + IPC_EXCL = 0x400 + IPC_NOWAIT = 0x800 + IPC_PRIVATE = 0x0 +) + +const ( + IPC_RMID = 0x0 + IPC_SET = 0x1 + IPC_STAT = 0x2 +) + +const ( + SHM_RDONLY = 0x1000 + SHM_RND = 0x2000 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 1f99c024a..4eec078e5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -31,6 +31,8 @@ type Timeval struct { Usec int32 } +type Time_t int32 + type Rusage struct { Utime Timeval Stime Timeval diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index ddf0305a5..7622904a5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -31,6 +31,8 @@ type Timeval struct { Usec int64 } +type Time_t int64 + type Rusage struct { Utime Timeval Stime Timeval diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index dce0a5c80..19223ce8e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -33,6 +33,8 @@ type Timeval struct { _ [4]byte } +type Time_t int32 + type Rusage struct { Utime Timeval Stime Timeval diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index e23244702..8e3e33f67 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -31,6 +31,8 @@ type Timeval struct { Usec int64 } +type Time_t int64 + type Rusage struct { Utime Timeval Stime Timeval diff --git a/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go index 236f37ef6..4c485261d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go @@ -13,6 +13,8 @@ const ( I_STR = 0x5308 I_POP = 0x5303 I_PUSH = 0x5302 + I_LINK = 0x530c + I_UNLINK = 0x530d I_PLINK = 0x5316 I_PUNLINK = 0x5317 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 72887abe5..e62611e53 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1,4 +1,4 @@ -// Code generated by mkmerge.go; DO NOT EDIT. +// Code generated by mkmerge; DO NOT EDIT. //go:build linux // +build linux @@ -24,6 +24,11 @@ type ItimerSpec struct { Value Timespec } +type Itimerval struct { + Interval Timeval + Value Timeval +} + const ( TIME_OK = 0x0 TIME_INS = 0x1 @@ -452,6 +457,11 @@ type CanFilter struct { Mask uint32 } +type TCPRepairOpt struct { + Code uint32 + Val uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -484,6 +494,7 @@ const ( SizeofUcred = 0xc SizeofTCPInfo = 0x68 SizeofCanFilter = 0x8 + SizeofTCPRepairOpt = 0x8 ) const ( @@ -681,6 +692,16 @@ type NdMsg struct { Type uint8 } +const ( + ICMP_FILTER = 0x1 + + ICMPV6_FILTER = 0x1 + ICMPV6_FILTER_BLOCK = 0x1 + ICMPV6_FILTER_BLOCKOTHERS = 0x3 + ICMPV6_FILTER_PASS = 0x2 + ICMPV6_FILTER_PASSONLY = 0x4 +) + const ( SizeofSockFilter = 0x8 ) @@ -727,10 +748,31 @@ const ( AT_STATX_FORCE_SYNC = 0x2000 AT_STATX_DONT_SYNC = 0x4000 + AT_RECURSIVE = 0x8000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 AT_EACCESS = 0x200 + + OPEN_TREE_CLONE = 0x1 + + MOVE_MOUNT_F_SYMLINKS = 0x1 + MOVE_MOUNT_F_AUTOMOUNTS = 0x2 + MOVE_MOUNT_F_EMPTY_PATH = 0x4 + MOVE_MOUNT_T_SYMLINKS = 0x10 + MOVE_MOUNT_T_AUTOMOUNTS = 0x20 + MOVE_MOUNT_T_EMPTY_PATH = 0x40 + MOVE_MOUNT_SET_GROUP = 0x100 + + FSOPEN_CLOEXEC = 0x1 + + FSPICK_CLOEXEC = 0x1 + FSPICK_SYMLINK_NOFOLLOW = 0x2 + FSPICK_NO_AUTOMOUNT = 0x4 + FSPICK_EMPTY_PATH = 0x8 + + FSMOUNT_CLOEXEC = 0x1 ) type OpenHow struct { @@ -849,6 +891,7 @@ const ( CTRL_CMD_NEWMCAST_GRP = 0x7 CTRL_CMD_DELMCAST_GRP = 0x8 CTRL_CMD_GETMCAST_GRP = 0x9 + CTRL_CMD_GETPOLICY = 0xa CTRL_ATTR_UNSPEC = 0x0 CTRL_ATTR_FAMILY_ID = 0x1 CTRL_ATTR_FAMILY_NAME = 0x2 @@ -857,12 +900,19 @@ const ( CTRL_ATTR_MAXATTR = 0x5 CTRL_ATTR_OPS = 0x6 CTRL_ATTR_MCAST_GROUPS = 0x7 + CTRL_ATTR_POLICY = 0x8 + CTRL_ATTR_OP_POLICY = 0x9 + CTRL_ATTR_OP = 0xa CTRL_ATTR_OP_UNSPEC = 0x0 CTRL_ATTR_OP_ID = 0x1 CTRL_ATTR_OP_FLAGS = 0x2 CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 + CTRL_ATTR_POLICY_UNSPEC = 0x0 + CTRL_ATTR_POLICY_DO = 0x1 + CTRL_ATTR_POLICY_DUMP = 0x2 + CTRL_ATTR_POLICY_DUMP_MAX = 0x2 ) const ( @@ -1001,7 +1051,7 @@ const ( PERF_COUNT_SW_EMULATION_FAULTS = 0x8 PERF_COUNT_SW_DUMMY = 0x9 PERF_COUNT_SW_BPF_OUTPUT = 0xa - PERF_COUNT_SW_MAX = 0xb + PERF_COUNT_SW_MAX = 0xc PERF_SAMPLE_IP = 0x1 PERF_SAMPLE_TID = 0x2 PERF_SAMPLE_TIME = 0x4 @@ -1077,7 +1127,9 @@ const ( PERF_BR_SYSRET = 0x8 PERF_BR_COND_CALL = 0x9 PERF_BR_COND_RET = 0xa - PERF_BR_MAX = 0xb + PERF_BR_ERET = 0xb + PERF_BR_IRQ = 0xc + PERF_BR_MAX = 0xd PERF_SAMPLE_REGS_ABI_NONE = 0x0 PERF_SAMPLE_REGS_ABI_32 = 0x1 PERF_SAMPLE_REGS_ABI_64 = 0x2 @@ -1118,7 +1170,8 @@ const ( PERF_RECORD_BPF_EVENT = 0x12 PERF_RECORD_CGROUP = 0x13 PERF_RECORD_TEXT_POKE = 0x14 - PERF_RECORD_MAX = 0x15 + PERF_RECORD_AUX_OUTPUT_HW_ID = 0x15 + PERF_RECORD_MAX = 0x16 PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0x0 PERF_RECORD_KSYMBOL_TYPE_BPF = 0x1 PERF_RECORD_KSYMBOL_TYPE_OOL = 0x2 @@ -1758,7 +1811,8 @@ const ( const ( NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 + NF_NETDEV_EGRESS = 0x1 + NF_NETDEV_NUMHOOKS = 0x2 ) const ( @@ -1773,6 +1827,8 @@ const ( NFPROTO_NUMPROTO = 0xd ) +const SO_ORIGINAL_DST = 0x50 + type Nfgenmsg struct { Nfgen_family uint8 Version uint8 @@ -2338,8 +2394,8 @@ const ( SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff + SOF_TIMESTAMPING_LAST = 0x8000 + SOF_TIMESTAMPING_MASK = 0xffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -2915,7 +2971,7 @@ const ( DEVLINK_CMD_TRAP_POLICER_NEW = 0x47 DEVLINK_CMD_TRAP_POLICER_DEL = 0x48 DEVLINK_CMD_HEALTH_REPORTER_TEST = 0x49 - DEVLINK_CMD_MAX = 0x49 + DEVLINK_CMD_MAX = 0x51 DEVLINK_PORT_TYPE_NOTSET = 0x0 DEVLINK_PORT_TYPE_AUTO = 0x1 DEVLINK_PORT_TYPE_ETH = 0x2 @@ -3138,7 +3194,13 @@ const ( DEVLINK_ATTR_RELOAD_ACTION_INFO = 0xa2 DEVLINK_ATTR_RELOAD_ACTION_STATS = 0xa3 DEVLINK_ATTR_PORT_PCI_SF_NUMBER = 0xa4 - DEVLINK_ATTR_MAX = 0xa4 + DEVLINK_ATTR_RATE_TYPE = 0xa5 + DEVLINK_ATTR_RATE_TX_SHARE = 0xa6 + DEVLINK_ATTR_RATE_TX_MAX = 0xa7 + DEVLINK_ATTR_RATE_NODE_NAME = 0xa8 + DEVLINK_ATTR_RATE_PARENT_NODE_NAME = 0xa9 + DEVLINK_ATTR_REGION_MAX_SNAPSHOTS = 0xaa + DEVLINK_ATTR_MAX = 0xae DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 @@ -3246,7 +3308,8 @@ const ( LWTUNNEL_ENCAP_BPF = 0x6 LWTUNNEL_ENCAP_SEG6_LOCAL = 0x7 LWTUNNEL_ENCAP_RPL = 0x8 - LWTUNNEL_ENCAP_MAX = 0x8 + LWTUNNEL_ENCAP_IOAM6 = 0x9 + LWTUNNEL_ENCAP_MAX = 0x9 MPLS_IPTUNNEL_UNSPEC = 0x0 MPLS_IPTUNNEL_DST = 0x1 @@ -3434,7 +3497,14 @@ const ( ETHTOOL_MSG_CABLE_TEST_ACT = 0x1a ETHTOOL_MSG_CABLE_TEST_TDR_ACT = 0x1b ETHTOOL_MSG_TUNNEL_INFO_GET = 0x1c - ETHTOOL_MSG_USER_MAX = 0x1c + ETHTOOL_MSG_FEC_GET = 0x1d + ETHTOOL_MSG_FEC_SET = 0x1e + ETHTOOL_MSG_MODULE_EEPROM_GET = 0x1f + ETHTOOL_MSG_STATS_GET = 0x20 + ETHTOOL_MSG_PHC_VCLOCKS_GET = 0x21 + ETHTOOL_MSG_MODULE_GET = 0x22 + ETHTOOL_MSG_MODULE_SET = 0x23 + ETHTOOL_MSG_USER_MAX = 0x23 ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3465,7 +3535,14 @@ const ( ETHTOOL_MSG_CABLE_TEST_NTF = 0x1b ETHTOOL_MSG_CABLE_TEST_TDR_NTF = 0x1c ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY = 0x1d - ETHTOOL_MSG_KERNEL_MAX = 0x1d + ETHTOOL_MSG_FEC_GET_REPLY = 0x1e + ETHTOOL_MSG_FEC_NTF = 0x1f + ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY = 0x20 + ETHTOOL_MSG_STATS_GET_REPLY = 0x21 + ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY = 0x22 + ETHTOOL_MSG_MODULE_GET_REPLY = 0x23 + ETHTOOL_MSG_MODULE_NTF = 0x24 + ETHTOOL_MSG_KERNEL_MAX = 0x24 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 @@ -3563,7 +3640,11 @@ const ( ETHTOOL_A_RINGS_RX_MINI = 0x7 ETHTOOL_A_RINGS_RX_JUMBO = 0x8 ETHTOOL_A_RINGS_TX = 0x9 - ETHTOOL_A_RINGS_MAX = 0x9 + ETHTOOL_A_RINGS_RX_BUF_LEN = 0xa + ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb + ETHTOOL_A_RINGS_CQE_SIZE = 0xc + ETHTOOL_A_RINGS_TX_PUSH = 0xd + ETHTOOL_A_RINGS_MAX = 0xd ETHTOOL_A_CHANNELS_UNSPEC = 0x0 ETHTOOL_A_CHANNELS_HEADER = 0x1 ETHTOOL_A_CHANNELS_RX_MAX = 0x2 @@ -3599,7 +3680,9 @@ const ( ETHTOOL_A_COALESCE_TX_USECS_HIGH = 0x15 ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH = 0x16 ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 - ETHTOOL_A_COALESCE_MAX = 0x17 + ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 + ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 + ETHTOOL_A_COALESCE_MAX = 0x19 ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 @@ -3713,6 +3796,8 @@ const ( ETHTOOL_A_TUNNEL_INFO_MAX = 0x2 ) +const SPEED_UNKNOWN = -0x1 + type EthtoolDrvinfo struct { Cmd uint32 Driver [32]byte @@ -3905,3 +3990,1607 @@ const ( NFC_SDP_ATTR_URI = 0x1 NFC_SDP_ATTR_SAP = 0x2 ) + +type LandlockRulesetAttr struct { + Access_fs uint64 +} + +type LandlockPathBeneathAttr struct { + Allowed_access uint64 + Parent_fd int32 +} + +const ( + LANDLOCK_RULE_PATH_BENEATH = 0x1 +) + +const ( + IPC_CREAT = 0x200 + IPC_EXCL = 0x400 + IPC_NOWAIT = 0x800 + IPC_PRIVATE = 0x0 + + ipc_64 = 0x100 +) + +const ( + IPC_RMID = 0x0 + IPC_SET = 0x1 + IPC_STAT = 0x2 +) + +const ( + SHM_RDONLY = 0x1000 + SHM_RND = 0x2000 +) + +type MountAttr struct { + Attr_set uint64 + Attr_clr uint64 + Propagation uint64 + Userns_fd uint64 +} + +const ( + WG_CMD_GET_DEVICE = 0x0 + WG_CMD_SET_DEVICE = 0x1 + WGDEVICE_F_REPLACE_PEERS = 0x1 + WGDEVICE_A_UNSPEC = 0x0 + WGDEVICE_A_IFINDEX = 0x1 + WGDEVICE_A_IFNAME = 0x2 + WGDEVICE_A_PRIVATE_KEY = 0x3 + WGDEVICE_A_PUBLIC_KEY = 0x4 + WGDEVICE_A_FLAGS = 0x5 + WGDEVICE_A_LISTEN_PORT = 0x6 + WGDEVICE_A_FWMARK = 0x7 + WGDEVICE_A_PEERS = 0x8 + WGPEER_F_REMOVE_ME = 0x1 + WGPEER_F_REPLACE_ALLOWEDIPS = 0x2 + WGPEER_F_UPDATE_ONLY = 0x4 + WGPEER_A_UNSPEC = 0x0 + WGPEER_A_PUBLIC_KEY = 0x1 + WGPEER_A_PRESHARED_KEY = 0x2 + WGPEER_A_FLAGS = 0x3 + WGPEER_A_ENDPOINT = 0x4 + WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL = 0x5 + WGPEER_A_LAST_HANDSHAKE_TIME = 0x6 + WGPEER_A_RX_BYTES = 0x7 + WGPEER_A_TX_BYTES = 0x8 + WGPEER_A_ALLOWEDIPS = 0x9 + WGPEER_A_PROTOCOL_VERSION = 0xa + WGALLOWEDIP_A_UNSPEC = 0x0 + WGALLOWEDIP_A_FAMILY = 0x1 + WGALLOWEDIP_A_IPADDR = 0x2 + WGALLOWEDIP_A_CIDR_MASK = 0x3 +) + +const ( + NL_ATTR_TYPE_INVALID = 0x0 + NL_ATTR_TYPE_FLAG = 0x1 + NL_ATTR_TYPE_U8 = 0x2 + NL_ATTR_TYPE_U16 = 0x3 + NL_ATTR_TYPE_U32 = 0x4 + NL_ATTR_TYPE_U64 = 0x5 + NL_ATTR_TYPE_S8 = 0x6 + NL_ATTR_TYPE_S16 = 0x7 + NL_ATTR_TYPE_S32 = 0x8 + NL_ATTR_TYPE_S64 = 0x9 + NL_ATTR_TYPE_BINARY = 0xa + NL_ATTR_TYPE_STRING = 0xb + NL_ATTR_TYPE_NUL_STRING = 0xc + NL_ATTR_TYPE_NESTED = 0xd + NL_ATTR_TYPE_NESTED_ARRAY = 0xe + NL_ATTR_TYPE_BITFIELD32 = 0xf + + NL_POLICY_TYPE_ATTR_UNSPEC = 0x0 + NL_POLICY_TYPE_ATTR_TYPE = 0x1 + NL_POLICY_TYPE_ATTR_MIN_VALUE_S = 0x2 + NL_POLICY_TYPE_ATTR_MAX_VALUE_S = 0x3 + NL_POLICY_TYPE_ATTR_MIN_VALUE_U = 0x4 + NL_POLICY_TYPE_ATTR_MAX_VALUE_U = 0x5 + NL_POLICY_TYPE_ATTR_MIN_LENGTH = 0x6 + NL_POLICY_TYPE_ATTR_MAX_LENGTH = 0x7 + NL_POLICY_TYPE_ATTR_POLICY_IDX = 0x8 + NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE = 0x9 + NL_POLICY_TYPE_ATTR_BITFIELD32_MASK = 0xa + NL_POLICY_TYPE_ATTR_PAD = 0xb + NL_POLICY_TYPE_ATTR_MASK = 0xc + NL_POLICY_TYPE_ATTR_MAX = 0xc +) + +type CANBitTiming struct { + Bitrate uint32 + Sample_point uint32 + Tq uint32 + Prop_seg uint32 + Phase_seg1 uint32 + Phase_seg2 uint32 + Sjw uint32 + Brp uint32 +} + +type CANBitTimingConst struct { + Name [16]uint8 + Tseg1_min uint32 + Tseg1_max uint32 + Tseg2_min uint32 + Tseg2_max uint32 + Sjw_max uint32 + Brp_min uint32 + Brp_max uint32 + Brp_inc uint32 +} + +type CANClock struct { + Freq uint32 +} + +type CANBusErrorCounters struct { + Txerr uint16 + Rxerr uint16 +} + +type CANCtrlMode struct { + Mask uint32 + Flags uint32 +} + +type CANDeviceStats struct { + Bus_error uint32 + Error_warning uint32 + Error_passive uint32 + Bus_off uint32 + Arbitration_lost uint32 + Restarts uint32 +} + +const ( + CAN_STATE_ERROR_ACTIVE = 0x0 + CAN_STATE_ERROR_WARNING = 0x1 + CAN_STATE_ERROR_PASSIVE = 0x2 + CAN_STATE_BUS_OFF = 0x3 + CAN_STATE_STOPPED = 0x4 + CAN_STATE_SLEEPING = 0x5 + CAN_STATE_MAX = 0x6 +) + +const ( + IFLA_CAN_UNSPEC = 0x0 + IFLA_CAN_BITTIMING = 0x1 + IFLA_CAN_BITTIMING_CONST = 0x2 + IFLA_CAN_CLOCK = 0x3 + IFLA_CAN_STATE = 0x4 + IFLA_CAN_CTRLMODE = 0x5 + IFLA_CAN_RESTART_MS = 0x6 + IFLA_CAN_RESTART = 0x7 + IFLA_CAN_BERR_COUNTER = 0x8 + IFLA_CAN_DATA_BITTIMING = 0x9 + IFLA_CAN_DATA_BITTIMING_CONST = 0xa + IFLA_CAN_TERMINATION = 0xb + IFLA_CAN_TERMINATION_CONST = 0xc + IFLA_CAN_BITRATE_CONST = 0xd + IFLA_CAN_DATA_BITRATE_CONST = 0xe + IFLA_CAN_BITRATE_MAX = 0xf +) + +type KCMAttach struct { + Fd int32 + Bpf_fd int32 +} + +type KCMUnattach struct { + Fd int32 +} + +type KCMClone struct { + Fd int32 +} + +const ( + NL80211_AC_BE = 0x2 + NL80211_AC_BK = 0x3 + NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED = 0x0 + NL80211_ACL_POLICY_DENY_UNLESS_LISTED = 0x1 + NL80211_AC_VI = 0x1 + NL80211_AC_VO = 0x0 + NL80211_ATTR_4ADDR = 0x53 + NL80211_ATTR_ACK = 0x5c + NL80211_ATTR_ACK_SIGNAL = 0x107 + NL80211_ATTR_ACL_POLICY = 0xa5 + NL80211_ATTR_ADMITTED_TIME = 0xd4 + NL80211_ATTR_AIRTIME_WEIGHT = 0x112 + NL80211_ATTR_AKM_SUITES = 0x4c + NL80211_ATTR_AP_ISOLATE = 0x60 + NL80211_ATTR_AUTH_DATA = 0x9c + NL80211_ATTR_AUTH_TYPE = 0x35 + NL80211_ATTR_BANDS = 0xef + NL80211_ATTR_BEACON_HEAD = 0xe + NL80211_ATTR_BEACON_INTERVAL = 0xc + NL80211_ATTR_BEACON_TAIL = 0xf + NL80211_ATTR_BG_SCAN_PERIOD = 0x98 + NL80211_ATTR_BSS_BASIC_RATES = 0x24 + NL80211_ATTR_BSS = 0x2f + NL80211_ATTR_BSS_CTS_PROT = 0x1c + NL80211_ATTR_BSS_HT_OPMODE = 0x6d + NL80211_ATTR_BSSID = 0xf5 + NL80211_ATTR_BSS_SELECT = 0xe3 + NL80211_ATTR_BSS_SHORT_PREAMBLE = 0x1d + NL80211_ATTR_BSS_SHORT_SLOT_TIME = 0x1e + NL80211_ATTR_CENTER_FREQ1 = 0xa0 + NL80211_ATTR_CENTER_FREQ1_OFFSET = 0x123 + NL80211_ATTR_CENTER_FREQ2 = 0xa1 + NL80211_ATTR_CHANNEL_WIDTH = 0x9f + NL80211_ATTR_CH_SWITCH_BLOCK_TX = 0xb8 + NL80211_ATTR_CH_SWITCH_COUNT = 0xb7 + NL80211_ATTR_CIPHER_SUITE_GROUP = 0x4a + NL80211_ATTR_CIPHER_SUITES = 0x39 + NL80211_ATTR_CIPHER_SUITES_PAIRWISE = 0x49 + NL80211_ATTR_CNTDWN_OFFS_BEACON = 0xba + NL80211_ATTR_CNTDWN_OFFS_PRESP = 0xbb + NL80211_ATTR_COALESCE_RULE = 0xb6 + NL80211_ATTR_COALESCE_RULE_CONDITION = 0x2 + NL80211_ATTR_COALESCE_RULE_DELAY = 0x1 + NL80211_ATTR_COALESCE_RULE_MAX = 0x3 + NL80211_ATTR_COALESCE_RULE_PKT_PATTERN = 0x3 + NL80211_ATTR_CONN_FAILED_REASON = 0x9b + NL80211_ATTR_CONTROL_PORT = 0x44 + NL80211_ATTR_CONTROL_PORT_ETHERTYPE = 0x66 + NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT = 0x67 + NL80211_ATTR_CONTROL_PORT_NO_PREAUTH = 0x11e + NL80211_ATTR_CONTROL_PORT_OVER_NL80211 = 0x108 + NL80211_ATTR_COOKIE = 0x58 + NL80211_ATTR_CQM_BEACON_LOSS_EVENT = 0x8 + NL80211_ATTR_CQM = 0x5e + NL80211_ATTR_CQM_MAX = 0x9 + NL80211_ATTR_CQM_PKT_LOSS_EVENT = 0x4 + NL80211_ATTR_CQM_RSSI_HYST = 0x2 + NL80211_ATTR_CQM_RSSI_LEVEL = 0x9 + NL80211_ATTR_CQM_RSSI_THOLD = 0x1 + NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT = 0x3 + NL80211_ATTR_CQM_TXE_INTVL = 0x7 + NL80211_ATTR_CQM_TXE_PKTS = 0x6 + NL80211_ATTR_CQM_TXE_RATE = 0x5 + NL80211_ATTR_CRIT_PROT_ID = 0xb3 + NL80211_ATTR_CSA_C_OFF_BEACON = 0xba + NL80211_ATTR_CSA_C_OFF_PRESP = 0xbb + NL80211_ATTR_CSA_C_OFFSETS_TX = 0xcd + NL80211_ATTR_CSA_IES = 0xb9 + NL80211_ATTR_DEVICE_AP_SME = 0x8d + NL80211_ATTR_DFS_CAC_TIME = 0x7 + NL80211_ATTR_DFS_REGION = 0x92 + NL80211_ATTR_DISABLE_HE = 0x12d + NL80211_ATTR_DISABLE_HT = 0x93 + NL80211_ATTR_DISABLE_VHT = 0xaf + NL80211_ATTR_DISCONNECTED_BY_AP = 0x47 + NL80211_ATTR_DONT_WAIT_FOR_ACK = 0x8e + NL80211_ATTR_DTIM_PERIOD = 0xd + NL80211_ATTR_DURATION = 0x57 + NL80211_ATTR_EXT_CAPA = 0xa9 + NL80211_ATTR_EXT_CAPA_MASK = 0xaa + NL80211_ATTR_EXTERNAL_AUTH_ACTION = 0x104 + NL80211_ATTR_EXTERNAL_AUTH_SUPPORT = 0x105 + NL80211_ATTR_EXT_FEATURES = 0xd9 + NL80211_ATTR_FEATURE_FLAGS = 0x8f + NL80211_ATTR_FILS_CACHE_ID = 0xfd + NL80211_ATTR_FILS_DISCOVERY = 0x126 + NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM = 0xfb + NL80211_ATTR_FILS_ERP_REALM = 0xfa + NL80211_ATTR_FILS_ERP_RRK = 0xfc + NL80211_ATTR_FILS_ERP_USERNAME = 0xf9 + NL80211_ATTR_FILS_KEK = 0xf2 + NL80211_ATTR_FILS_NONCES = 0xf3 + NL80211_ATTR_FRAME = 0x33 + NL80211_ATTR_FRAME_MATCH = 0x5b + NL80211_ATTR_FRAME_TYPE = 0x65 + NL80211_ATTR_FREQ_AFTER = 0x3b + NL80211_ATTR_FREQ_BEFORE = 0x3a + NL80211_ATTR_FREQ_FIXED = 0x3c + NL80211_ATTR_FREQ_RANGE_END = 0x3 + NL80211_ATTR_FREQ_RANGE_MAX_BW = 0x4 + NL80211_ATTR_FREQ_RANGE_START = 0x2 + NL80211_ATTR_FTM_RESPONDER = 0x10e + NL80211_ATTR_FTM_RESPONDER_STATS = 0x10f + NL80211_ATTR_GENERATION = 0x2e + NL80211_ATTR_HANDLE_DFS = 0xbf + NL80211_ATTR_HE_6GHZ_CAPABILITY = 0x125 + NL80211_ATTR_HE_BSS_COLOR = 0x11b + NL80211_ATTR_HE_CAPABILITY = 0x10d + NL80211_ATTR_HE_OBSS_PD = 0x117 + NL80211_ATTR_HIDDEN_SSID = 0x7e + NL80211_ATTR_HT_CAPABILITY = 0x1f + NL80211_ATTR_HT_CAPABILITY_MASK = 0x94 + NL80211_ATTR_IE_ASSOC_RESP = 0x80 + NL80211_ATTR_IE = 0x2a + NL80211_ATTR_IE_PROBE_RESP = 0x7f + NL80211_ATTR_IE_RIC = 0xb2 + NL80211_ATTR_IFACE_SOCKET_OWNER = 0xcc + NL80211_ATTR_IFINDEX = 0x3 + NL80211_ATTR_IFNAME = 0x4 + NL80211_ATTR_IFTYPE_AKM_SUITES = 0x11c + NL80211_ATTR_IFTYPE = 0x5 + NL80211_ATTR_IFTYPE_EXT_CAPA = 0xe6 + NL80211_ATTR_INACTIVITY_TIMEOUT = 0x96 + NL80211_ATTR_INTERFACE_COMBINATIONS = 0x78 + NL80211_ATTR_KEY_CIPHER = 0x9 + NL80211_ATTR_KEY = 0x50 + NL80211_ATTR_KEY_DATA = 0x7 + NL80211_ATTR_KEY_DEFAULT = 0xb + NL80211_ATTR_KEY_DEFAULT_MGMT = 0x28 + NL80211_ATTR_KEY_DEFAULT_TYPES = 0x6e + NL80211_ATTR_KEY_IDX = 0x8 + NL80211_ATTR_KEYS = 0x51 + NL80211_ATTR_KEY_SEQ = 0xa + NL80211_ATTR_KEY_TYPE = 0x37 + NL80211_ATTR_LOCAL_MESH_POWER_MODE = 0xa4 + NL80211_ATTR_LOCAL_STATE_CHANGE = 0x5f + NL80211_ATTR_MAC_ACL_MAX = 0xa7 + NL80211_ATTR_MAC_ADDRS = 0xa6 + NL80211_ATTR_MAC = 0x6 + NL80211_ATTR_MAC_HINT = 0xc8 + NL80211_ATTR_MAC_MASK = 0xd7 + NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca + NL80211_ATTR_MAX = 0x137 + NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 + NL80211_ATTR_MAX_CSA_COUNTERS = 0xce + NL80211_ATTR_MAX_MATCH_SETS = 0x85 + NL80211_ATTR_MAX_NUM_PMKIDS = 0x56 + NL80211_ATTR_MAX_NUM_SCAN_SSIDS = 0x2b + NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS = 0xde + NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS = 0x7b + NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION = 0x6f + NL80211_ATTR_MAX_SCAN_IE_LEN = 0x38 + NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL = 0xdf + NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS = 0xe0 + NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN = 0x7c + NL80211_ATTR_MCAST_RATE = 0x6b + NL80211_ATTR_MDID = 0xb1 + NL80211_ATTR_MEASUREMENT_DURATION = 0xeb + NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY = 0xec + NL80211_ATTR_MESH_CONFIG = 0x23 + NL80211_ATTR_MESH_ID = 0x18 + NL80211_ATTR_MESH_PEER_AID = 0xed + NL80211_ATTR_MESH_SETUP = 0x70 + NL80211_ATTR_MGMT_SUBTYPE = 0x29 + NL80211_ATTR_MNTR_FLAGS = 0x17 + NL80211_ATTR_MPATH_INFO = 0x1b + NL80211_ATTR_MPATH_NEXT_HOP = 0x1a + NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED = 0xf4 + NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR = 0xe8 + NL80211_ATTR_MU_MIMO_GROUP_DATA = 0xe7 + NL80211_ATTR_NAN_FUNC = 0xf0 + NL80211_ATTR_NAN_MASTER_PREF = 0xee + NL80211_ATTR_NAN_MATCH = 0xf1 + NL80211_ATTR_NETNS_FD = 0xdb + NL80211_ATTR_NOACK_MAP = 0x95 + NL80211_ATTR_NSS = 0x106 + NL80211_ATTR_OFFCHANNEL_TX_OK = 0x6c + NL80211_ATTR_OPER_CLASS = 0xd6 + NL80211_ATTR_OPMODE_NOTIF = 0xc2 + NL80211_ATTR_P2P_CTWINDOW = 0xa2 + NL80211_ATTR_P2P_OPPPS = 0xa3 + NL80211_ATTR_PAD = 0xe5 + NL80211_ATTR_PBSS = 0xe2 + NL80211_ATTR_PEER_AID = 0xb5 + NL80211_ATTR_PEER_MEASUREMENTS = 0x111 + NL80211_ATTR_PID = 0x52 + NL80211_ATTR_PMK = 0xfe + NL80211_ATTR_PMKID = 0x55 + NL80211_ATTR_PMK_LIFETIME = 0x11f + NL80211_ATTR_PMKR0_NAME = 0x102 + NL80211_ATTR_PMK_REAUTH_THRESHOLD = 0x120 + NL80211_ATTR_PMKSA_CANDIDATE = 0x86 + NL80211_ATTR_PORT_AUTHORIZED = 0x103 + NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN = 0x5 + NL80211_ATTR_POWER_RULE_MAX_EIRP = 0x6 + NL80211_ATTR_PREV_BSSID = 0x4f + NL80211_ATTR_PRIVACY = 0x46 + NL80211_ATTR_PROBE_RESP = 0x91 + NL80211_ATTR_PROBE_RESP_OFFLOAD = 0x90 + NL80211_ATTR_PROTOCOL_FEATURES = 0xad + NL80211_ATTR_PS_STATE = 0x5d + NL80211_ATTR_QOS_MAP = 0xc7 + NL80211_ATTR_RADAR_EVENT = 0xa8 + NL80211_ATTR_REASON_CODE = 0x36 + NL80211_ATTR_RECEIVE_MULTICAST = 0x121 + NL80211_ATTR_RECONNECT_REQUESTED = 0x12b + NL80211_ATTR_REG_ALPHA2 = 0x21 + NL80211_ATTR_REG_INDOOR = 0xdd + NL80211_ATTR_REG_INITIATOR = 0x30 + NL80211_ATTR_REG_RULE_FLAGS = 0x1 + NL80211_ATTR_REG_RULES = 0x22 + NL80211_ATTR_REG_TYPE = 0x31 + NL80211_ATTR_REKEY_DATA = 0x7a + NL80211_ATTR_REQ_IE = 0x4d + NL80211_ATTR_RESP_IE = 0x4e + NL80211_ATTR_ROAM_SUPPORT = 0x83 + NL80211_ATTR_RX_FRAME_TYPES = 0x64 + NL80211_ATTR_RXMGMT_FLAGS = 0xbc + NL80211_ATTR_RX_SIGNAL_DBM = 0x97 + NL80211_ATTR_S1G_CAPABILITY = 0x128 + NL80211_ATTR_S1G_CAPABILITY_MASK = 0x129 + NL80211_ATTR_SAE_DATA = 0x9c + NL80211_ATTR_SAE_PASSWORD = 0x115 + NL80211_ATTR_SAE_PWE = 0x12a + NL80211_ATTR_SAR_SPEC = 0x12c + NL80211_ATTR_SCAN_FLAGS = 0x9e + NL80211_ATTR_SCAN_FREQ_KHZ = 0x124 + NL80211_ATTR_SCAN_FREQUENCIES = 0x2c + NL80211_ATTR_SCAN_GENERATION = 0x2e + NL80211_ATTR_SCAN_SSIDS = 0x2d + NL80211_ATTR_SCAN_START_TIME_TSF_BSSID = 0xea + NL80211_ATTR_SCAN_START_TIME_TSF = 0xe9 + NL80211_ATTR_SCAN_SUPP_RATES = 0x7d + NL80211_ATTR_SCHED_SCAN_DELAY = 0xdc + NL80211_ATTR_SCHED_SCAN_INTERVAL = 0x77 + NL80211_ATTR_SCHED_SCAN_MATCH = 0x84 + NL80211_ATTR_SCHED_SCAN_MATCH_SSID = 0x1 + NL80211_ATTR_SCHED_SCAN_MAX_REQS = 0x100 + NL80211_ATTR_SCHED_SCAN_MULTI = 0xff + NL80211_ATTR_SCHED_SCAN_PLANS = 0xe1 + NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI = 0xf6 + NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST = 0xf7 + NL80211_ATTR_SMPS_MODE = 0xd5 + NL80211_ATTR_SOCKET_OWNER = 0xcc + NL80211_ATTR_SOFTWARE_IFTYPES = 0x79 + NL80211_ATTR_SPLIT_WIPHY_DUMP = 0xae + NL80211_ATTR_SSID = 0x34 + NL80211_ATTR_STA_AID = 0x10 + NL80211_ATTR_STA_CAPABILITY = 0xab + NL80211_ATTR_STA_EXT_CAPABILITY = 0xac + NL80211_ATTR_STA_FLAGS2 = 0x43 + NL80211_ATTR_STA_FLAGS = 0x11 + NL80211_ATTR_STA_INFO = 0x15 + NL80211_ATTR_STA_LISTEN_INTERVAL = 0x12 + NL80211_ATTR_STA_PLINK_ACTION = 0x19 + NL80211_ATTR_STA_PLINK_STATE = 0x74 + NL80211_ATTR_STA_SUPPORTED_CHANNELS = 0xbd + NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES = 0xbe + NL80211_ATTR_STA_SUPPORTED_RATES = 0x13 + NL80211_ATTR_STA_SUPPORT_P2P_PS = 0xe4 + NL80211_ATTR_STATUS_CODE = 0x48 + NL80211_ATTR_STA_TX_POWER = 0x114 + NL80211_ATTR_STA_TX_POWER_SETTING = 0x113 + NL80211_ATTR_STA_VLAN = 0x14 + NL80211_ATTR_STA_WME = 0x81 + NL80211_ATTR_SUPPORT_10_MHZ = 0xc1 + NL80211_ATTR_SUPPORT_5_MHZ = 0xc0 + NL80211_ATTR_SUPPORT_AP_UAPSD = 0x82 + NL80211_ATTR_SUPPORTED_COMMANDS = 0x32 + NL80211_ATTR_SUPPORTED_IFTYPES = 0x20 + NL80211_ATTR_SUPPORT_IBSS_RSN = 0x68 + NL80211_ATTR_SUPPORT_MESH_AUTH = 0x73 + NL80211_ATTR_SURVEY_INFO = 0x54 + NL80211_ATTR_SURVEY_RADIO_STATS = 0xda + NL80211_ATTR_TDLS_ACTION = 0x88 + NL80211_ATTR_TDLS_DIALOG_TOKEN = 0x89 + NL80211_ATTR_TDLS_EXTERNAL_SETUP = 0x8c + NL80211_ATTR_TDLS_INITIATOR = 0xcf + NL80211_ATTR_TDLS_OPERATION = 0x8a + NL80211_ATTR_TDLS_PEER_CAPABILITY = 0xcb + NL80211_ATTR_TDLS_SUPPORT = 0x8b + NL80211_ATTR_TESTDATA = 0x45 + NL80211_ATTR_TID_CONFIG = 0x11d + NL80211_ATTR_TIMED_OUT = 0x41 + NL80211_ATTR_TIMEOUT = 0x110 + NL80211_ATTR_TIMEOUT_REASON = 0xf8 + NL80211_ATTR_TSID = 0xd2 + NL80211_ATTR_TWT_RESPONDER = 0x116 + NL80211_ATTR_TX_FRAME_TYPES = 0x63 + NL80211_ATTR_TX_NO_CCK_RATE = 0x87 + NL80211_ATTR_TXQ_LIMIT = 0x10a + NL80211_ATTR_TXQ_MEMORY_LIMIT = 0x10b + NL80211_ATTR_TXQ_QUANTUM = 0x10c + NL80211_ATTR_TXQ_STATS = 0x109 + NL80211_ATTR_TX_RATES = 0x5a + NL80211_ATTR_UNSOL_BCAST_PROBE_RESP = 0x127 + NL80211_ATTR_UNSPEC = 0x0 + NL80211_ATTR_USE_MFP = 0x42 + NL80211_ATTR_USER_PRIO = 0xd3 + NL80211_ATTR_USER_REG_HINT_TYPE = 0x9a + NL80211_ATTR_USE_RRM = 0xd0 + NL80211_ATTR_VENDOR_DATA = 0xc5 + NL80211_ATTR_VENDOR_EVENTS = 0xc6 + NL80211_ATTR_VENDOR_ID = 0xc3 + NL80211_ATTR_VENDOR_SUBCMD = 0xc4 + NL80211_ATTR_VHT_CAPABILITY = 0x9d + NL80211_ATTR_VHT_CAPABILITY_MASK = 0xb0 + NL80211_ATTR_VLAN_ID = 0x11a + NL80211_ATTR_WANT_1X_4WAY_HS = 0x101 + NL80211_ATTR_WDEV = 0x99 + NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX = 0x72 + NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX = 0x71 + NL80211_ATTR_WIPHY_ANTENNA_RX = 0x6a + NL80211_ATTR_WIPHY_ANTENNA_TX = 0x69 + NL80211_ATTR_WIPHY_BANDS = 0x16 + NL80211_ATTR_WIPHY_CHANNEL_TYPE = 0x27 + NL80211_ATTR_WIPHY = 0x1 + NL80211_ATTR_WIPHY_COVERAGE_CLASS = 0x59 + NL80211_ATTR_WIPHY_DYN_ACK = 0xd1 + NL80211_ATTR_WIPHY_EDMG_BW_CONFIG = 0x119 + NL80211_ATTR_WIPHY_EDMG_CHANNELS = 0x118 + NL80211_ATTR_WIPHY_FRAG_THRESHOLD = 0x3f + NL80211_ATTR_WIPHY_FREQ = 0x26 + NL80211_ATTR_WIPHY_FREQ_HINT = 0xc9 + NL80211_ATTR_WIPHY_FREQ_OFFSET = 0x122 + NL80211_ATTR_WIPHY_NAME = 0x2 + NL80211_ATTR_WIPHY_RETRY_LONG = 0x3e + NL80211_ATTR_WIPHY_RETRY_SHORT = 0x3d + NL80211_ATTR_WIPHY_RTS_THRESHOLD = 0x40 + NL80211_ATTR_WIPHY_SELF_MANAGED_REG = 0xd8 + NL80211_ATTR_WIPHY_TX_POWER_LEVEL = 0x62 + NL80211_ATTR_WIPHY_TX_POWER_SETTING = 0x61 + NL80211_ATTR_WIPHY_TXQ_PARAMS = 0x25 + NL80211_ATTR_WOWLAN_TRIGGERS = 0x75 + NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED = 0x76 + NL80211_ATTR_WPA_VERSIONS = 0x4b + NL80211_AUTHTYPE_AUTOMATIC = 0x8 + NL80211_AUTHTYPE_FILS_PK = 0x7 + NL80211_AUTHTYPE_FILS_SK = 0x5 + NL80211_AUTHTYPE_FILS_SK_PFS = 0x6 + NL80211_AUTHTYPE_FT = 0x2 + NL80211_AUTHTYPE_MAX = 0x7 + NL80211_AUTHTYPE_NETWORK_EAP = 0x3 + NL80211_AUTHTYPE_OPEN_SYSTEM = 0x0 + NL80211_AUTHTYPE_SAE = 0x4 + NL80211_AUTHTYPE_SHARED_KEY = 0x1 + NL80211_BAND_2GHZ = 0x0 + NL80211_BAND_5GHZ = 0x1 + NL80211_BAND_60GHZ = 0x2 + NL80211_BAND_6GHZ = 0x3 + NL80211_BAND_ATTR_EDMG_BW_CONFIG = 0xb + NL80211_BAND_ATTR_EDMG_CHANNELS = 0xa + NL80211_BAND_ATTR_FREQS = 0x1 + NL80211_BAND_ATTR_HT_AMPDU_DENSITY = 0x6 + NL80211_BAND_ATTR_HT_AMPDU_FACTOR = 0x5 + NL80211_BAND_ATTR_HT_CAPA = 0x4 + NL80211_BAND_ATTR_HT_MCS_SET = 0x3 + NL80211_BAND_ATTR_IFTYPE_DATA = 0x9 + NL80211_BAND_ATTR_MAX = 0xb + NL80211_BAND_ATTR_RATES = 0x2 + NL80211_BAND_ATTR_VHT_CAPA = 0x8 + NL80211_BAND_ATTR_VHT_MCS_SET = 0x7 + NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA = 0x6 + NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC = 0x2 + NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET = 0x4 + NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY = 0x3 + NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE = 0x5 + NL80211_BAND_IFTYPE_ATTR_IFTYPES = 0x1 + NL80211_BAND_IFTYPE_ATTR_MAX = 0xb + NL80211_BAND_S1GHZ = 0x4 + NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE = 0x2 + NL80211_BITRATE_ATTR_MAX = 0x2 + NL80211_BITRATE_ATTR_RATE = 0x1 + NL80211_BSS_BEACON_IES = 0xb + NL80211_BSS_BEACON_INTERVAL = 0x4 + NL80211_BSS_BEACON_TSF = 0xd + NL80211_BSS_BSSID = 0x1 + NL80211_BSS_CAPABILITY = 0x5 + NL80211_BSS_CHAIN_SIGNAL = 0x13 + NL80211_BSS_CHAN_WIDTH_10 = 0x1 + NL80211_BSS_CHAN_WIDTH_1 = 0x3 + NL80211_BSS_CHAN_WIDTH_20 = 0x0 + NL80211_BSS_CHAN_WIDTH_2 = 0x4 + NL80211_BSS_CHAN_WIDTH_5 = 0x2 + NL80211_BSS_CHAN_WIDTH = 0xc + NL80211_BSS_FREQUENCY = 0x2 + NL80211_BSS_FREQUENCY_OFFSET = 0x14 + NL80211_BSS_INFORMATION_ELEMENTS = 0x6 + NL80211_BSS_LAST_SEEN_BOOTTIME = 0xf + NL80211_BSS_MAX = 0x14 + NL80211_BSS_PAD = 0x10 + NL80211_BSS_PARENT_BSSID = 0x12 + NL80211_BSS_PARENT_TSF = 0x11 + NL80211_BSS_PRESP_DATA = 0xe + NL80211_BSS_SEEN_MS_AGO = 0xa + NL80211_BSS_SELECT_ATTR_BAND_PREF = 0x2 + NL80211_BSS_SELECT_ATTR_MAX = 0x3 + NL80211_BSS_SELECT_ATTR_RSSI_ADJUST = 0x3 + NL80211_BSS_SELECT_ATTR_RSSI = 0x1 + NL80211_BSS_SIGNAL_MBM = 0x7 + NL80211_BSS_SIGNAL_UNSPEC = 0x8 + NL80211_BSS_STATUS_ASSOCIATED = 0x1 + NL80211_BSS_STATUS_AUTHENTICATED = 0x0 + NL80211_BSS_STATUS = 0x9 + NL80211_BSS_STATUS_IBSS_JOINED = 0x2 + NL80211_BSS_TSF = 0x3 + NL80211_CHAN_HT20 = 0x1 + NL80211_CHAN_HT40MINUS = 0x2 + NL80211_CHAN_HT40PLUS = 0x3 + NL80211_CHAN_NO_HT = 0x0 + NL80211_CHAN_WIDTH_10 = 0x7 + NL80211_CHAN_WIDTH_160 = 0x5 + NL80211_CHAN_WIDTH_16 = 0xc + NL80211_CHAN_WIDTH_1 = 0x8 + NL80211_CHAN_WIDTH_20 = 0x1 + NL80211_CHAN_WIDTH_20_NOHT = 0x0 + NL80211_CHAN_WIDTH_2 = 0x9 + NL80211_CHAN_WIDTH_40 = 0x2 + NL80211_CHAN_WIDTH_4 = 0xa + NL80211_CHAN_WIDTH_5 = 0x6 + NL80211_CHAN_WIDTH_80 = 0x3 + NL80211_CHAN_WIDTH_80P80 = 0x4 + NL80211_CHAN_WIDTH_8 = 0xb + NL80211_CMD_ABORT_SCAN = 0x72 + NL80211_CMD_ACTION = 0x3b + NL80211_CMD_ACTION_TX_STATUS = 0x3c + NL80211_CMD_ADD_NAN_FUNCTION = 0x75 + NL80211_CMD_ADD_TX_TS = 0x69 + NL80211_CMD_ASSOCIATE = 0x26 + NL80211_CMD_AUTHENTICATE = 0x25 + NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL = 0x38 + NL80211_CMD_CHANGE_NAN_CONFIG = 0x77 + NL80211_CMD_CHANNEL_SWITCH = 0x66 + NL80211_CMD_CH_SWITCH_NOTIFY = 0x58 + NL80211_CMD_CH_SWITCH_STARTED_NOTIFY = 0x6e + NL80211_CMD_CONNECT = 0x2e + NL80211_CMD_CONN_FAILED = 0x5b + NL80211_CMD_CONTROL_PORT_FRAME = 0x81 + NL80211_CMD_CONTROL_PORT_FRAME_TX_STATUS = 0x8b + NL80211_CMD_CRIT_PROTOCOL_START = 0x62 + NL80211_CMD_CRIT_PROTOCOL_STOP = 0x63 + NL80211_CMD_DEAUTHENTICATE = 0x27 + NL80211_CMD_DEL_BEACON = 0x10 + NL80211_CMD_DEL_INTERFACE = 0x8 + NL80211_CMD_DEL_KEY = 0xc + NL80211_CMD_DEL_MPATH = 0x18 + NL80211_CMD_DEL_NAN_FUNCTION = 0x76 + NL80211_CMD_DEL_PMK = 0x7c + NL80211_CMD_DEL_PMKSA = 0x35 + NL80211_CMD_DEL_STATION = 0x14 + NL80211_CMD_DEL_TX_TS = 0x6a + NL80211_CMD_DEL_WIPHY = 0x4 + NL80211_CMD_DISASSOCIATE = 0x28 + NL80211_CMD_DISCONNECT = 0x30 + NL80211_CMD_EXTERNAL_AUTH = 0x7f + NL80211_CMD_FLUSH_PMKSA = 0x36 + NL80211_CMD_FRAME = 0x3b + NL80211_CMD_FRAME_TX_STATUS = 0x3c + NL80211_CMD_FRAME_WAIT_CANCEL = 0x43 + NL80211_CMD_FT_EVENT = 0x61 + NL80211_CMD_GET_BEACON = 0xd + NL80211_CMD_GET_COALESCE = 0x64 + NL80211_CMD_GET_FTM_RESPONDER_STATS = 0x82 + NL80211_CMD_GET_INTERFACE = 0x5 + NL80211_CMD_GET_KEY = 0x9 + NL80211_CMD_GET_MESH_CONFIG = 0x1c + NL80211_CMD_GET_MESH_PARAMS = 0x1c + NL80211_CMD_GET_MPATH = 0x15 + NL80211_CMD_GET_MPP = 0x6b + NL80211_CMD_GET_POWER_SAVE = 0x3e + NL80211_CMD_GET_PROTOCOL_FEATURES = 0x5f + NL80211_CMD_GET_REG = 0x1f + NL80211_CMD_GET_SCAN = 0x20 + NL80211_CMD_GET_STATION = 0x11 + NL80211_CMD_GET_SURVEY = 0x32 + NL80211_CMD_GET_WIPHY = 0x1 + NL80211_CMD_GET_WOWLAN = 0x49 + NL80211_CMD_JOIN_IBSS = 0x2b + NL80211_CMD_JOIN_MESH = 0x44 + NL80211_CMD_JOIN_OCB = 0x6c + NL80211_CMD_LEAVE_IBSS = 0x2c + NL80211_CMD_LEAVE_MESH = 0x45 + NL80211_CMD_LEAVE_OCB = 0x6d + NL80211_CMD_MAX = 0x93 + NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 + NL80211_CMD_NAN_MATCH = 0x78 + NL80211_CMD_NEW_BEACON = 0xf + NL80211_CMD_NEW_INTERFACE = 0x7 + NL80211_CMD_NEW_KEY = 0xb + NL80211_CMD_NEW_MPATH = 0x17 + NL80211_CMD_NEW_PEER_CANDIDATE = 0x48 + NL80211_CMD_NEW_SCAN_RESULTS = 0x22 + NL80211_CMD_NEW_STATION = 0x13 + NL80211_CMD_NEW_SURVEY_RESULTS = 0x33 + NL80211_CMD_NEW_WIPHY = 0x3 + NL80211_CMD_NOTIFY_CQM = 0x40 + NL80211_CMD_NOTIFY_RADAR = 0x86 + NL80211_CMD_PEER_MEASUREMENT_COMPLETE = 0x85 + NL80211_CMD_PEER_MEASUREMENT_RESULT = 0x84 + NL80211_CMD_PEER_MEASUREMENT_START = 0x83 + NL80211_CMD_PMKSA_CANDIDATE = 0x50 + NL80211_CMD_PORT_AUTHORIZED = 0x7d + NL80211_CMD_PROBE_CLIENT = 0x54 + NL80211_CMD_PROBE_MESH_LINK = 0x88 + NL80211_CMD_RADAR_DETECT = 0x5e + NL80211_CMD_REG_BEACON_HINT = 0x2a + NL80211_CMD_REG_CHANGE = 0x24 + NL80211_CMD_REGISTER_ACTION = 0x3a + NL80211_CMD_REGISTER_BEACONS = 0x55 + NL80211_CMD_REGISTER_FRAME = 0x3a + NL80211_CMD_RELOAD_REGDB = 0x7e + NL80211_CMD_REMAIN_ON_CHANNEL = 0x37 + NL80211_CMD_REQ_SET_REG = 0x1b + NL80211_CMD_ROAM = 0x2f + NL80211_CMD_SCAN_ABORTED = 0x23 + NL80211_CMD_SCHED_SCAN_RESULTS = 0x4d + NL80211_CMD_SCHED_SCAN_STOPPED = 0x4e + NL80211_CMD_SET_BEACON = 0xe + NL80211_CMD_SET_BSS = 0x19 + NL80211_CMD_SET_CHANNEL = 0x41 + NL80211_CMD_SET_COALESCE = 0x65 + NL80211_CMD_SET_CQM = 0x3f + NL80211_CMD_SET_INTERFACE = 0x6 + NL80211_CMD_SET_KEY = 0xa + NL80211_CMD_SET_MAC_ACL = 0x5d + NL80211_CMD_SET_MCAST_RATE = 0x5c + NL80211_CMD_SET_MESH_CONFIG = 0x1d + NL80211_CMD_SET_MESH_PARAMS = 0x1d + NL80211_CMD_SET_MGMT_EXTRA_IE = 0x1e + NL80211_CMD_SET_MPATH = 0x16 + NL80211_CMD_SET_MULTICAST_TO_UNICAST = 0x79 + NL80211_CMD_SET_NOACK_MAP = 0x57 + NL80211_CMD_SET_PMK = 0x7b + NL80211_CMD_SET_PMKSA = 0x34 + NL80211_CMD_SET_POWER_SAVE = 0x3d + NL80211_CMD_SET_QOS_MAP = 0x68 + NL80211_CMD_SET_REG = 0x1a + NL80211_CMD_SET_REKEY_OFFLOAD = 0x4f + NL80211_CMD_SET_SAR_SPECS = 0x8c + NL80211_CMD_SET_STATION = 0x12 + NL80211_CMD_SET_TID_CONFIG = 0x89 + NL80211_CMD_SET_TX_BITRATE_MASK = 0x39 + NL80211_CMD_SET_WDS_PEER = 0x42 + NL80211_CMD_SET_WIPHY = 0x2 + NL80211_CMD_SET_WIPHY_NETNS = 0x31 + NL80211_CMD_SET_WOWLAN = 0x4a + NL80211_CMD_STA_OPMODE_CHANGED = 0x80 + NL80211_CMD_START_AP = 0xf + NL80211_CMD_START_NAN = 0x73 + NL80211_CMD_START_P2P_DEVICE = 0x59 + NL80211_CMD_START_SCHED_SCAN = 0x4b + NL80211_CMD_STOP_AP = 0x10 + NL80211_CMD_STOP_NAN = 0x74 + NL80211_CMD_STOP_P2P_DEVICE = 0x5a + NL80211_CMD_STOP_SCHED_SCAN = 0x4c + NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH = 0x70 + NL80211_CMD_TDLS_CHANNEL_SWITCH = 0x6f + NL80211_CMD_TDLS_MGMT = 0x52 + NL80211_CMD_TDLS_OPER = 0x51 + NL80211_CMD_TESTMODE = 0x2d + NL80211_CMD_TRIGGER_SCAN = 0x21 + NL80211_CMD_UNEXPECTED_4ADDR_FRAME = 0x56 + NL80211_CMD_UNEXPECTED_FRAME = 0x53 + NL80211_CMD_UNPROT_BEACON = 0x8a + NL80211_CMD_UNPROT_DEAUTHENTICATE = 0x46 + NL80211_CMD_UNPROT_DISASSOCIATE = 0x47 + NL80211_CMD_UNSPEC = 0x0 + NL80211_CMD_UPDATE_CONNECT_PARAMS = 0x7a + NL80211_CMD_UPDATE_FT_IES = 0x60 + NL80211_CMD_UPDATE_OWE_INFO = 0x87 + NL80211_CMD_VENDOR = 0x67 + NL80211_CMD_WIPHY_REG_CHANGE = 0x71 + NL80211_COALESCE_CONDITION_MATCH = 0x0 + NL80211_COALESCE_CONDITION_NO_MATCH = 0x1 + NL80211_CONN_FAIL_BLOCKED_CLIENT = 0x1 + NL80211_CONN_FAIL_MAX_CLIENTS = 0x0 + NL80211_CQM_RSSI_BEACON_LOSS_EVENT = 0x2 + NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH = 0x1 + NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW = 0x0 + NL80211_CQM_TXE_MAX_INTVL = 0x708 + NL80211_CRIT_PROTO_APIPA = 0x3 + NL80211_CRIT_PROTO_DHCP = 0x1 + NL80211_CRIT_PROTO_EAPOL = 0x2 + NL80211_CRIT_PROTO_MAX_DURATION = 0x1388 + NL80211_CRIT_PROTO_UNSPEC = 0x0 + NL80211_DFS_AVAILABLE = 0x2 + NL80211_DFS_ETSI = 0x2 + NL80211_DFS_FCC = 0x1 + NL80211_DFS_JP = 0x3 + NL80211_DFS_UNAVAILABLE = 0x1 + NL80211_DFS_UNSET = 0x0 + NL80211_DFS_USABLE = 0x0 + NL80211_EDMG_BW_CONFIG_MAX = 0xf + NL80211_EDMG_BW_CONFIG_MIN = 0x4 + NL80211_EDMG_CHANNELS_MAX = 0x3c + NL80211_EDMG_CHANNELS_MIN = 0x1 + NL80211_EXTERNAL_AUTH_ABORT = 0x1 + NL80211_EXTERNAL_AUTH_START = 0x0 + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK = 0x32 + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X = 0x10 + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK = 0xf + NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP = 0x12 + NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT = 0x1b + NL80211_EXT_FEATURE_AIRTIME_FAIRNESS = 0x21 + NL80211_EXT_FEATURE_AP_PMKSA_CACHING = 0x22 + NL80211_EXT_FEATURE_AQL = 0x28 + NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT = 0x2e + NL80211_EXT_FEATURE_BEACON_PROTECTION = 0x29 + NL80211_EXT_FEATURE_BEACON_RATE_HE = 0x36 + NL80211_EXT_FEATURE_BEACON_RATE_HT = 0x7 + NL80211_EXT_FEATURE_BEACON_RATE_LEGACY = 0x6 + NL80211_EXT_FEATURE_BEACON_RATE_VHT = 0x8 + NL80211_EXT_FEATURE_BSS_PARENT_TSF = 0x4 + NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 = 0x1f + NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH = 0x2a + NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211 = 0x1a + NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS = 0x30 + NL80211_EXT_FEATURE_CQM_RSSI_LIST = 0xd + NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = 0x1b + NL80211_EXT_FEATURE_DEL_IBSS_STA = 0x2c + NL80211_EXT_FEATURE_DFS_OFFLOAD = 0x19 + NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 0x20 + NL80211_EXT_FEATURE_EXT_KEY_ID = 0x24 + NL80211_EXT_FEATURE_FILS_DISCOVERY = 0x34 + NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME = 0x11 + NL80211_EXT_FEATURE_FILS_SK_OFFLOAD = 0xe + NL80211_EXT_FEATURE_FILS_STA = 0x9 + NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN = 0x18 + NL80211_EXT_FEATURE_LOW_POWER_SCAN = 0x17 + NL80211_EXT_FEATURE_LOW_SPAN_SCAN = 0x16 + NL80211_EXT_FEATURE_MFP_OPTIONAL = 0x15 + NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA = 0xa + NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED = 0xb + NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS = 0x2d + NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER = 0x2 + NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x14 + NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 0x13 + NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 0x31 + NL80211_EXT_FEATURE_PROTECTED_TWT = 0x2b + NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 0x39 + NL80211_EXT_FEATURE_RRM = 0x1 + NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 0x33 + NL80211_EXT_FEATURE_SAE_OFFLOAD = 0x26 + NL80211_EXT_FEATURE_SCAN_FREQ_KHZ = 0x2f + NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT = 0x1e + NL80211_EXT_FEATURE_SCAN_RANDOM_SN = 0x1d + NL80211_EXT_FEATURE_SCAN_START_TIME = 0x3 + NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD = 0x23 + NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI = 0xc + NL80211_EXT_FEATURE_SECURE_LTF = 0x37 + NL80211_EXT_FEATURE_SECURE_RTT = 0x38 + NL80211_EXT_FEATURE_SET_SCAN_DWELL = 0x5 + NL80211_EXT_FEATURE_STA_TX_PWR = 0x25 + NL80211_EXT_FEATURE_TXQS = 0x1c + NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP = 0x35 + NL80211_EXT_FEATURE_VHT_IBSS = 0x0 + NL80211_EXT_FEATURE_VLAN_OFFLOAD = 0x27 + NL80211_FEATURE_ACKTO_ESTIMATION = 0x800000 + NL80211_FEATURE_ACTIVE_MONITOR = 0x20000 + NL80211_FEATURE_ADVERTISE_CHAN_LIMITS = 0x4000 + NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE = 0x40000 + NL80211_FEATURE_AP_SCAN = 0x100 + NL80211_FEATURE_CELL_BASE_REG_HINTS = 0x8 + NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES = 0x80000 + NL80211_FEATURE_DYNAMIC_SMPS = 0x2000000 + NL80211_FEATURE_FULL_AP_CLIENT_STATE = 0x8000 + NL80211_FEATURE_HT_IBSS = 0x2 + NL80211_FEATURE_INACTIVITY_TIMER = 0x4 + NL80211_FEATURE_LOW_PRIORITY_SCAN = 0x40 + NL80211_FEATURE_MAC_ON_CREATE = 0x8000000 + NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 0x80000000 + NL80211_FEATURE_NEED_OBSS_SCAN = 0x400 + NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL = 0x10 + NL80211_FEATURE_P2P_GO_CTWIN = 0x800 + NL80211_FEATURE_P2P_GO_OPPPS = 0x1000 + NL80211_FEATURE_QUIET = 0x200000 + NL80211_FEATURE_SAE = 0x20 + NL80211_FEATURE_SCAN_FLUSH = 0x80 + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR = 0x20000000 + NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR = 0x40000000 + NL80211_FEATURE_SK_TX_STATUS = 0x1 + NL80211_FEATURE_STATIC_SMPS = 0x1000000 + NL80211_FEATURE_SUPPORTS_WMM_ADMISSION = 0x4000000 + NL80211_FEATURE_TDLS_CHANNEL_SWITCH = 0x10000000 + NL80211_FEATURE_TX_POWER_INSERTION = 0x400000 + NL80211_FEATURE_USERSPACE_MPM = 0x10000 + NL80211_FEATURE_VIF_TXPOWER = 0x200 + NL80211_FEATURE_WFA_TPC_IE_IN_PROBES = 0x100000 + NL80211_FILS_DISCOVERY_ATTR_INT_MAX = 0x2 + NL80211_FILS_DISCOVERY_ATTR_INT_MIN = 0x1 + NL80211_FILS_DISCOVERY_ATTR_MAX = 0x3 + NL80211_FILS_DISCOVERY_ATTR_TMPL = 0x3 + NL80211_FILS_DISCOVERY_TMPL_MIN_LEN = 0x2a + NL80211_FREQUENCY_ATTR_16MHZ = 0x19 + NL80211_FREQUENCY_ATTR_1MHZ = 0x15 + NL80211_FREQUENCY_ATTR_2MHZ = 0x16 + NL80211_FREQUENCY_ATTR_4MHZ = 0x17 + NL80211_FREQUENCY_ATTR_8MHZ = 0x18 + NL80211_FREQUENCY_ATTR_DFS_CAC_TIME = 0xd + NL80211_FREQUENCY_ATTR_DFS_STATE = 0x7 + NL80211_FREQUENCY_ATTR_DFS_TIME = 0x8 + NL80211_FREQUENCY_ATTR_DISABLED = 0x2 + NL80211_FREQUENCY_ATTR_FREQ = 0x1 + NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf + NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe + NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf + NL80211_FREQUENCY_ATTR_MAX = 0x1b + NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 + NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 + NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc + NL80211_FREQUENCY_ATTR_NO_20MHZ = 0x10 + NL80211_FREQUENCY_ATTR_NO_80MHZ = 0xb + NL80211_FREQUENCY_ATTR_NO_HE = 0x13 + NL80211_FREQUENCY_ATTR_NO_HT40_MINUS = 0x9 + NL80211_FREQUENCY_ATTR_NO_HT40_PLUS = 0xa + NL80211_FREQUENCY_ATTR_NO_IBSS = 0x3 + NL80211_FREQUENCY_ATTR_NO_IR = 0x3 + NL80211_FREQUENCY_ATTR_OFFSET = 0x14 + NL80211_FREQUENCY_ATTR_PASSIVE_SCAN = 0x3 + NL80211_FREQUENCY_ATTR_RADAR = 0x5 + NL80211_FREQUENCY_ATTR_WMM = 0x12 + NL80211_FTM_RESP_ATTR_CIVICLOC = 0x3 + NL80211_FTM_RESP_ATTR_ENABLED = 0x1 + NL80211_FTM_RESP_ATTR_LCI = 0x2 + NL80211_FTM_RESP_ATTR_MAX = 0x3 + NL80211_FTM_STATS_ASAP_NUM = 0x4 + NL80211_FTM_STATS_FAILED_NUM = 0x3 + NL80211_FTM_STATS_MAX = 0xa + NL80211_FTM_STATS_NON_ASAP_NUM = 0x5 + NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM = 0x9 + NL80211_FTM_STATS_PAD = 0xa + NL80211_FTM_STATS_PARTIAL_NUM = 0x2 + NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM = 0x8 + NL80211_FTM_STATS_SUCCESS_NUM = 0x1 + NL80211_FTM_STATS_TOTAL_DURATION_MSEC = 0x6 + NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM = 0x7 + NL80211_GENL_NAME = "nl80211" + NL80211_HE_BSS_COLOR_ATTR_COLOR = 0x1 + NL80211_HE_BSS_COLOR_ATTR_DISABLED = 0x2 + NL80211_HE_BSS_COLOR_ATTR_MAX = 0x3 + NL80211_HE_BSS_COLOR_ATTR_PARTIAL = 0x3 + NL80211_HE_MAX_CAPABILITY_LEN = 0x36 + NL80211_HE_MIN_CAPABILITY_LEN = 0x10 + NL80211_HE_NSS_MAX = 0x8 + NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP = 0x4 + NL80211_HE_OBSS_PD_ATTR_MAX = 0x6 + NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET = 0x2 + NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET = 0x1 + NL80211_HE_OBSS_PD_ATTR_NON_SRG_MAX_OFFSET = 0x3 + NL80211_HE_OBSS_PD_ATTR_PARTIAL_BSSID_BITMAP = 0x5 + NL80211_HE_OBSS_PD_ATTR_SR_CTRL = 0x6 + NL80211_HIDDEN_SSID_NOT_IN_USE = 0x0 + NL80211_HIDDEN_SSID_ZERO_CONTENTS = 0x2 + NL80211_HIDDEN_SSID_ZERO_LEN = 0x1 + NL80211_HT_CAPABILITY_LEN = 0x1a + NL80211_IFACE_COMB_BI_MIN_GCD = 0x7 + NL80211_IFACE_COMB_LIMITS = 0x1 + NL80211_IFACE_COMB_MAXNUM = 0x2 + NL80211_IFACE_COMB_NUM_CHANNELS = 0x4 + NL80211_IFACE_COMB_RADAR_DETECT_REGIONS = 0x6 + NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS = 0x5 + NL80211_IFACE_COMB_STA_AP_BI_MATCH = 0x3 + NL80211_IFACE_COMB_UNSPEC = 0x0 + NL80211_IFACE_LIMIT_MAX = 0x1 + NL80211_IFACE_LIMIT_TYPES = 0x2 + NL80211_IFACE_LIMIT_UNSPEC = 0x0 + NL80211_IFTYPE_ADHOC = 0x1 + NL80211_IFTYPE_AKM_ATTR_IFTYPES = 0x1 + NL80211_IFTYPE_AKM_ATTR_MAX = 0x2 + NL80211_IFTYPE_AKM_ATTR_SUITES = 0x2 + NL80211_IFTYPE_AP = 0x3 + NL80211_IFTYPE_AP_VLAN = 0x4 + NL80211_IFTYPE_MAX = 0xc + NL80211_IFTYPE_MESH_POINT = 0x7 + NL80211_IFTYPE_MONITOR = 0x6 + NL80211_IFTYPE_NAN = 0xc + NL80211_IFTYPE_OCB = 0xb + NL80211_IFTYPE_P2P_CLIENT = 0x8 + NL80211_IFTYPE_P2P_DEVICE = 0xa + NL80211_IFTYPE_P2P_GO = 0x9 + NL80211_IFTYPE_STATION = 0x2 + NL80211_IFTYPE_UNSPECIFIED = 0x0 + NL80211_IFTYPE_WDS = 0x5 + NL80211_KCK_EXT_LEN = 0x18 + NL80211_KCK_LEN = 0x10 + NL80211_KEK_EXT_LEN = 0x20 + NL80211_KEK_LEN = 0x10 + NL80211_KEY_CIPHER = 0x3 + NL80211_KEY_DATA = 0x1 + NL80211_KEY_DEFAULT_BEACON = 0xa + NL80211_KEY_DEFAULT = 0x5 + NL80211_KEY_DEFAULT_MGMT = 0x6 + NL80211_KEY_DEFAULT_TYPE_MULTICAST = 0x2 + NL80211_KEY_DEFAULT_TYPES = 0x8 + NL80211_KEY_DEFAULT_TYPE_UNICAST = 0x1 + NL80211_KEY_IDX = 0x2 + NL80211_KEY_MAX = 0xa + NL80211_KEY_MODE = 0x9 + NL80211_KEY_NO_TX = 0x1 + NL80211_KEY_RX_TX = 0x0 + NL80211_KEY_SEQ = 0x4 + NL80211_KEY_SET_TX = 0x2 + NL80211_KEY_TYPE = 0x7 + NL80211_KEYTYPE_GROUP = 0x0 + NL80211_KEYTYPE_PAIRWISE = 0x1 + NL80211_KEYTYPE_PEERKEY = 0x2 + NL80211_MAX_NR_AKM_SUITES = 0x2 + NL80211_MAX_NR_CIPHER_SUITES = 0x5 + NL80211_MAX_SUPP_HT_RATES = 0x4d + NL80211_MAX_SUPP_RATES = 0x20 + NL80211_MAX_SUPP_REG_RULES = 0x80 + NL80211_MESHCONF_ATTR_MAX = 0x1f + NL80211_MESHCONF_AUTO_OPEN_PLINKS = 0x7 + NL80211_MESHCONF_AWAKE_WINDOW = 0x1b + NL80211_MESHCONF_CONFIRM_TIMEOUT = 0x2 + NL80211_MESHCONF_CONNECTED_TO_AS = 0x1f + NL80211_MESHCONF_CONNECTED_TO_GATE = 0x1d + NL80211_MESHCONF_ELEMENT_TTL = 0xf + NL80211_MESHCONF_FORWARDING = 0x13 + NL80211_MESHCONF_GATE_ANNOUNCEMENTS = 0x11 + NL80211_MESHCONF_HOLDING_TIMEOUT = 0x3 + NL80211_MESHCONF_HT_OPMODE = 0x16 + NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT = 0xb + NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL = 0x19 + NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES = 0x8 + NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME = 0xd + NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT = 0x17 + NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL = 0x12 + NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL = 0xc + NL80211_MESHCONF_HWMP_RANN_INTERVAL = 0x10 + NL80211_MESHCONF_HWMP_ROOT_INTERVAL = 0x18 + NL80211_MESHCONF_HWMP_ROOTMODE = 0xe + NL80211_MESHCONF_MAX_PEER_LINKS = 0x4 + NL80211_MESHCONF_MAX_RETRIES = 0x5 + NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT = 0xa + NL80211_MESHCONF_NOLEARN = 0x1e + NL80211_MESHCONF_PATH_REFRESH_TIME = 0x9 + NL80211_MESHCONF_PLINK_TIMEOUT = 0x1c + NL80211_MESHCONF_POWER_MODE = 0x1a + NL80211_MESHCONF_RETRY_TIMEOUT = 0x1 + NL80211_MESHCONF_RSSI_THRESHOLD = 0x14 + NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR = 0x15 + NL80211_MESHCONF_TTL = 0x6 + NL80211_MESH_POWER_ACTIVE = 0x1 + NL80211_MESH_POWER_DEEP_SLEEP = 0x3 + NL80211_MESH_POWER_LIGHT_SLEEP = 0x2 + NL80211_MESH_POWER_MAX = 0x3 + NL80211_MESH_POWER_UNKNOWN = 0x0 + NL80211_MESH_SETUP_ATTR_MAX = 0x8 + NL80211_MESH_SETUP_AUTH_PROTOCOL = 0x8 + NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC = 0x2 + NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL = 0x1 + NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC = 0x6 + NL80211_MESH_SETUP_IE = 0x3 + NL80211_MESH_SETUP_USERSPACE_AMPE = 0x5 + NL80211_MESH_SETUP_USERSPACE_AUTH = 0x4 + NL80211_MESH_SETUP_USERSPACE_MPM = 0x7 + NL80211_MESH_SETUP_VENDOR_PATH_SEL_IE = 0x3 + NL80211_MFP_NO = 0x0 + NL80211_MFP_OPTIONAL = 0x2 + NL80211_MFP_REQUIRED = 0x1 + NL80211_MIN_REMAIN_ON_CHANNEL_TIME = 0xa + NL80211_MNTR_FLAG_ACTIVE = 0x6 + NL80211_MNTR_FLAG_CONTROL = 0x3 + NL80211_MNTR_FLAG_COOK_FRAMES = 0x5 + NL80211_MNTR_FLAG_FCSFAIL = 0x1 + NL80211_MNTR_FLAG_MAX = 0x6 + NL80211_MNTR_FLAG_OTHER_BSS = 0x4 + NL80211_MNTR_FLAG_PLCPFAIL = 0x2 + NL80211_MPATH_FLAG_ACTIVE = 0x1 + NL80211_MPATH_FLAG_FIXED = 0x8 + NL80211_MPATH_FLAG_RESOLVED = 0x10 + NL80211_MPATH_FLAG_RESOLVING = 0x2 + NL80211_MPATH_FLAG_SN_VALID = 0x4 + NL80211_MPATH_INFO_DISCOVERY_RETRIES = 0x7 + NL80211_MPATH_INFO_DISCOVERY_TIMEOUT = 0x6 + NL80211_MPATH_INFO_EXPTIME = 0x4 + NL80211_MPATH_INFO_FLAGS = 0x5 + NL80211_MPATH_INFO_FRAME_QLEN = 0x1 + NL80211_MPATH_INFO_HOP_COUNT = 0x8 + NL80211_MPATH_INFO_MAX = 0x9 + NL80211_MPATH_INFO_METRIC = 0x3 + NL80211_MPATH_INFO_PATH_CHANGE = 0x9 + NL80211_MPATH_INFO_SN = 0x2 + NL80211_MULTICAST_GROUP_CONFIG = "config" + NL80211_MULTICAST_GROUP_MLME = "mlme" + NL80211_MULTICAST_GROUP_NAN = "nan" + NL80211_MULTICAST_GROUP_REG = "regulatory" + NL80211_MULTICAST_GROUP_SCAN = "scan" + NL80211_MULTICAST_GROUP_TESTMODE = "testmode" + NL80211_MULTICAST_GROUP_VENDOR = "vendor" + NL80211_NAN_FUNC_ATTR_MAX = 0x10 + NL80211_NAN_FUNC_CLOSE_RANGE = 0x9 + NL80211_NAN_FUNC_FOLLOW_UP = 0x2 + NL80211_NAN_FUNC_FOLLOW_UP_DEST = 0x8 + NL80211_NAN_FUNC_FOLLOW_UP_ID = 0x6 + NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID = 0x7 + NL80211_NAN_FUNC_INSTANCE_ID = 0xf + NL80211_NAN_FUNC_MAX_TYPE = 0x2 + NL80211_NAN_FUNC_PUBLISH_BCAST = 0x4 + NL80211_NAN_FUNC_PUBLISH = 0x0 + NL80211_NAN_FUNC_PUBLISH_TYPE = 0x3 + NL80211_NAN_FUNC_RX_MATCH_FILTER = 0xd + NL80211_NAN_FUNC_SERVICE_ID = 0x2 + NL80211_NAN_FUNC_SERVICE_ID_LEN = 0x6 + NL80211_NAN_FUNC_SERVICE_INFO = 0xb + NL80211_NAN_FUNC_SERVICE_SPEC_INFO_MAX_LEN = 0xff + NL80211_NAN_FUNC_SRF = 0xc + NL80211_NAN_FUNC_SRF_MAX_LEN = 0xff + NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE = 0x5 + NL80211_NAN_FUNC_SUBSCRIBE = 0x1 + NL80211_NAN_FUNC_TERM_REASON = 0x10 + NL80211_NAN_FUNC_TERM_REASON_ERROR = 0x2 + NL80211_NAN_FUNC_TERM_REASON_TTL_EXPIRED = 0x1 + NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST = 0x0 + NL80211_NAN_FUNC_TTL = 0xa + NL80211_NAN_FUNC_TX_MATCH_FILTER = 0xe + NL80211_NAN_FUNC_TYPE = 0x1 + NL80211_NAN_MATCH_ATTR_MAX = 0x2 + NL80211_NAN_MATCH_FUNC_LOCAL = 0x1 + NL80211_NAN_MATCH_FUNC_PEER = 0x2 + NL80211_NAN_SOLICITED_PUBLISH = 0x1 + NL80211_NAN_SRF_ATTR_MAX = 0x4 + NL80211_NAN_SRF_BF = 0x2 + NL80211_NAN_SRF_BF_IDX = 0x3 + NL80211_NAN_SRF_INCLUDE = 0x1 + NL80211_NAN_SRF_MAC_ADDRS = 0x4 + NL80211_NAN_UNSOLICITED_PUBLISH = 0x2 + NL80211_NUM_ACS = 0x4 + NL80211_P2P_PS_SUPPORTED = 0x1 + NL80211_P2P_PS_UNSUPPORTED = 0x0 + NL80211_PKTPAT_MASK = 0x1 + NL80211_PKTPAT_OFFSET = 0x3 + NL80211_PKTPAT_PATTERN = 0x2 + NL80211_PLINK_ACTION_BLOCK = 0x2 + NL80211_PLINK_ACTION_NO_ACTION = 0x0 + NL80211_PLINK_ACTION_OPEN = 0x1 + NL80211_PLINK_BLOCKED = 0x6 + NL80211_PLINK_CNF_RCVD = 0x3 + NL80211_PLINK_ESTAB = 0x4 + NL80211_PLINK_HOLDING = 0x5 + NL80211_PLINK_LISTEN = 0x0 + NL80211_PLINK_OPN_RCVD = 0x2 + NL80211_PLINK_OPN_SNT = 0x1 + NL80211_PMKSA_CANDIDATE_BSSID = 0x2 + NL80211_PMKSA_CANDIDATE_INDEX = 0x1 + NL80211_PMKSA_CANDIDATE_PREAUTH = 0x3 + NL80211_PMSR_ATTR_MAX = 0x5 + NL80211_PMSR_ATTR_MAX_PEERS = 0x1 + NL80211_PMSR_ATTR_PEERS = 0x5 + NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR = 0x3 + NL80211_PMSR_ATTR_REPORT_AP_TSF = 0x2 + NL80211_PMSR_ATTR_TYPE_CAPA = 0x4 + NL80211_PMSR_FTM_CAPA_ATTR_ASAP = 0x1 + NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS = 0x6 + NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT = 0x7 + NL80211_PMSR_FTM_CAPA_ATTR_MAX = 0xa + NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST = 0x8 + NL80211_PMSR_FTM_CAPA_ATTR_NON_ASAP = 0x2 + NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED = 0xa + NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES = 0x5 + NL80211_PMSR_FTM_CAPA_ATTR_REQ_CIVICLOC = 0x4 + NL80211_PMSR_FTM_CAPA_ATTR_REQ_LCI = 0x3 + NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED = 0x9 + NL80211_PMSR_FTM_FAILURE_BAD_CHANGED_PARAMS = 0x7 + NL80211_PMSR_FTM_FAILURE_INVALID_TIMESTAMP = 0x5 + NL80211_PMSR_FTM_FAILURE_NO_RESPONSE = 0x1 + NL80211_PMSR_FTM_FAILURE_PEER_BUSY = 0x6 + NL80211_PMSR_FTM_FAILURE_PEER_NOT_CAPABLE = 0x4 + NL80211_PMSR_FTM_FAILURE_REJECTED = 0x2 + NL80211_PMSR_FTM_FAILURE_UNSPECIFIED = 0x0 + NL80211_PMSR_FTM_FAILURE_WRONG_CHANNEL = 0x3 + NL80211_PMSR_FTM_REQ_ATTR_ASAP = 0x1 + NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION = 0x5 + NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD = 0x4 + NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST = 0x6 + NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK = 0xc + NL80211_PMSR_FTM_REQ_ATTR_MAX = 0xd + NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED = 0xb + NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP = 0x3 + NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES = 0x7 + NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE = 0x2 + NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC = 0x9 + NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI = 0x8 + NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED = 0xa + NL80211_PMSR_FTM_RESP_ATTR_BURST_DURATION = 0x7 + NL80211_PMSR_FTM_RESP_ATTR_BURST_INDEX = 0x2 + NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME = 0x5 + NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC = 0x14 + NL80211_PMSR_FTM_RESP_ATTR_DIST_AVG = 0x10 + NL80211_PMSR_FTM_RESP_ATTR_DIST_SPREAD = 0x12 + NL80211_PMSR_FTM_RESP_ATTR_DIST_VARIANCE = 0x11 + NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON = 0x1 + NL80211_PMSR_FTM_RESP_ATTR_FTMS_PER_BURST = 0x8 + NL80211_PMSR_FTM_RESP_ATTR_LCI = 0x13 + NL80211_PMSR_FTM_RESP_ATTR_MAX = 0x15 + NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP = 0x6 + NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS = 0x3 + NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES = 0x4 + NL80211_PMSR_FTM_RESP_ATTR_PAD = 0x15 + NL80211_PMSR_FTM_RESP_ATTR_RSSI_AVG = 0x9 + NL80211_PMSR_FTM_RESP_ATTR_RSSI_SPREAD = 0xa + NL80211_PMSR_FTM_RESP_ATTR_RTT_AVG = 0xd + NL80211_PMSR_FTM_RESP_ATTR_RTT_SPREAD = 0xf + NL80211_PMSR_FTM_RESP_ATTR_RTT_VARIANCE = 0xe + NL80211_PMSR_FTM_RESP_ATTR_RX_RATE = 0xc + NL80211_PMSR_FTM_RESP_ATTR_TX_RATE = 0xb + NL80211_PMSR_PEER_ATTR_ADDR = 0x1 + NL80211_PMSR_PEER_ATTR_CHAN = 0x2 + NL80211_PMSR_PEER_ATTR_MAX = 0x4 + NL80211_PMSR_PEER_ATTR_REQ = 0x3 + NL80211_PMSR_PEER_ATTR_RESP = 0x4 + NL80211_PMSR_REQ_ATTR_DATA = 0x1 + NL80211_PMSR_REQ_ATTR_GET_AP_TSF = 0x2 + NL80211_PMSR_REQ_ATTR_MAX = 0x2 + NL80211_PMSR_RESP_ATTR_AP_TSF = 0x4 + NL80211_PMSR_RESP_ATTR_DATA = 0x1 + NL80211_PMSR_RESP_ATTR_FINAL = 0x5 + NL80211_PMSR_RESP_ATTR_HOST_TIME = 0x3 + NL80211_PMSR_RESP_ATTR_MAX = 0x6 + NL80211_PMSR_RESP_ATTR_PAD = 0x6 + NL80211_PMSR_RESP_ATTR_STATUS = 0x2 + NL80211_PMSR_STATUS_FAILURE = 0x3 + NL80211_PMSR_STATUS_REFUSED = 0x1 + NL80211_PMSR_STATUS_SUCCESS = 0x0 + NL80211_PMSR_STATUS_TIMEOUT = 0x2 + NL80211_PMSR_TYPE_FTM = 0x1 + NL80211_PMSR_TYPE_INVALID = 0x0 + NL80211_PMSR_TYPE_MAX = 0x1 + NL80211_PREAMBLE_DMG = 0x3 + NL80211_PREAMBLE_HE = 0x4 + NL80211_PREAMBLE_HT = 0x1 + NL80211_PREAMBLE_LEGACY = 0x0 + NL80211_PREAMBLE_VHT = 0x2 + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U = 0x8 + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P = 0x4 + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 = 0x2 + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS = 0x1 + NL80211_PROTOCOL_FEATURE_SPLIT_WIPHY_DUMP = 0x1 + NL80211_PS_DISABLED = 0x0 + NL80211_PS_ENABLED = 0x1 + NL80211_RADAR_CAC_ABORTED = 0x2 + NL80211_RADAR_CAC_FINISHED = 0x1 + NL80211_RADAR_CAC_STARTED = 0x5 + NL80211_RADAR_DETECTED = 0x0 + NL80211_RADAR_NOP_FINISHED = 0x3 + NL80211_RADAR_PRE_CAC_EXPIRED = 0x4 + NL80211_RATE_INFO_10_MHZ_WIDTH = 0xb + NL80211_RATE_INFO_160_MHZ_WIDTH = 0xa + NL80211_RATE_INFO_40_MHZ_WIDTH = 0x3 + NL80211_RATE_INFO_5_MHZ_WIDTH = 0xc + NL80211_RATE_INFO_80_MHZ_WIDTH = 0x8 + NL80211_RATE_INFO_80P80_MHZ_WIDTH = 0x9 + NL80211_RATE_INFO_BITRATE32 = 0x5 + NL80211_RATE_INFO_BITRATE = 0x1 + NL80211_RATE_INFO_HE_1XLTF = 0x0 + NL80211_RATE_INFO_HE_2XLTF = 0x1 + NL80211_RATE_INFO_HE_4XLTF = 0x2 + NL80211_RATE_INFO_HE_DCM = 0x10 + NL80211_RATE_INFO_HE_GI_0_8 = 0x0 + NL80211_RATE_INFO_HE_GI_1_6 = 0x1 + NL80211_RATE_INFO_HE_GI_3_2 = 0x2 + NL80211_RATE_INFO_HE_GI = 0xf + NL80211_RATE_INFO_HE_MCS = 0xd + NL80211_RATE_INFO_HE_NSS = 0xe + NL80211_RATE_INFO_HE_RU_ALLOC_106 = 0x2 + NL80211_RATE_INFO_HE_RU_ALLOC_242 = 0x3 + NL80211_RATE_INFO_HE_RU_ALLOC_26 = 0x0 + NL80211_RATE_INFO_HE_RU_ALLOC_2x996 = 0x6 + NL80211_RATE_INFO_HE_RU_ALLOC_484 = 0x4 + NL80211_RATE_INFO_HE_RU_ALLOC_52 = 0x1 + NL80211_RATE_INFO_HE_RU_ALLOC_996 = 0x5 + NL80211_RATE_INFO_HE_RU_ALLOC = 0x11 + NL80211_RATE_INFO_MAX = 0x16 + NL80211_RATE_INFO_MCS = 0x2 + NL80211_RATE_INFO_SHORT_GI = 0x4 + NL80211_RATE_INFO_VHT_MCS = 0x6 + NL80211_RATE_INFO_VHT_NSS = 0x7 + NL80211_REGDOM_SET_BY_CORE = 0x0 + NL80211_REGDOM_SET_BY_COUNTRY_IE = 0x3 + NL80211_REGDOM_SET_BY_DRIVER = 0x2 + NL80211_REGDOM_SET_BY_USER = 0x1 + NL80211_REGDOM_TYPE_COUNTRY = 0x0 + NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2 + NL80211_REGDOM_TYPE_INTERSECTION = 0x3 + NL80211_REGDOM_TYPE_WORLD = 0x1 + NL80211_REG_RULE_ATTR_MAX = 0x7 + NL80211_REKEY_DATA_AKM = 0x4 + NL80211_REKEY_DATA_KCK = 0x2 + NL80211_REKEY_DATA_KEK = 0x1 + NL80211_REKEY_DATA_REPLAY_CTR = 0x3 + NL80211_REPLAY_CTR_LEN = 0x8 + NL80211_RRF_AUTO_BW = 0x800 + NL80211_RRF_DFS = 0x10 + NL80211_RRF_GO_CONCURRENT = 0x1000 + NL80211_RRF_IR_CONCURRENT = 0x1000 + NL80211_RRF_NO_160MHZ = 0x10000 + NL80211_RRF_NO_80MHZ = 0x8000 + NL80211_RRF_NO_CCK = 0x2 + NL80211_RRF_NO_HE = 0x20000 + NL80211_RRF_NO_HT40 = 0x6000 + NL80211_RRF_NO_HT40MINUS = 0x2000 + NL80211_RRF_NO_HT40PLUS = 0x4000 + NL80211_RRF_NO_IBSS = 0x80 + NL80211_RRF_NO_INDOOR = 0x4 + NL80211_RRF_NO_IR_ALL = 0x180 + NL80211_RRF_NO_IR = 0x80 + NL80211_RRF_NO_OFDM = 0x1 + NL80211_RRF_NO_OUTDOOR = 0x8 + NL80211_RRF_PASSIVE_SCAN = 0x80 + NL80211_RRF_PTMP_ONLY = 0x40 + NL80211_RRF_PTP_ONLY = 0x20 + NL80211_RXMGMT_FLAG_ANSWERED = 0x1 + NL80211_RXMGMT_FLAG_EXTERNAL_AUTH = 0x2 + NL80211_SAE_PWE_BOTH = 0x3 + NL80211_SAE_PWE_HASH_TO_ELEMENT = 0x2 + NL80211_SAE_PWE_HUNT_AND_PECK = 0x1 + NL80211_SAE_PWE_UNSPECIFIED = 0x0 + NL80211_SAR_ATTR_MAX = 0x2 + NL80211_SAR_ATTR_SPECS = 0x2 + NL80211_SAR_ATTR_SPECS_END_FREQ = 0x4 + NL80211_SAR_ATTR_SPECS_MAX = 0x4 + NL80211_SAR_ATTR_SPECS_POWER = 0x1 + NL80211_SAR_ATTR_SPECS_RANGE_INDEX = 0x2 + NL80211_SAR_ATTR_SPECS_START_FREQ = 0x3 + NL80211_SAR_ATTR_TYPE = 0x1 + NL80211_SAR_TYPE_POWER = 0x0 + NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP = 0x20 + NL80211_SCAN_FLAG_AP = 0x4 + NL80211_SCAN_FLAG_COLOCATED_6GHZ = 0x4000 + NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME = 0x10 + NL80211_SCAN_FLAG_FLUSH = 0x2 + NL80211_SCAN_FLAG_FREQ_KHZ = 0x2000 + NL80211_SCAN_FLAG_HIGH_ACCURACY = 0x400 + NL80211_SCAN_FLAG_LOW_POWER = 0x200 + NL80211_SCAN_FLAG_LOW_PRIORITY = 0x1 + NL80211_SCAN_FLAG_LOW_SPAN = 0x100 + NL80211_SCAN_FLAG_MIN_PREQ_CONTENT = 0x1000 + NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x80 + NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE = 0x40 + NL80211_SCAN_FLAG_RANDOM_ADDR = 0x8 + NL80211_SCAN_FLAG_RANDOM_SN = 0x800 + NL80211_SCAN_RSSI_THOLD_OFF = -0x12c + NL80211_SCHED_SCAN_MATCH_ATTR_BSSID = 0x5 + NL80211_SCHED_SCAN_MATCH_ATTR_MAX = 0x6 + NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI = 0x3 + NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST = 0x4 + NL80211_SCHED_SCAN_MATCH_ATTR_RSSI = 0x2 + NL80211_SCHED_SCAN_MATCH_ATTR_SSID = 0x1 + NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI = 0x6 + NL80211_SCHED_SCAN_PLAN_INTERVAL = 0x1 + NL80211_SCHED_SCAN_PLAN_ITERATIONS = 0x2 + NL80211_SCHED_SCAN_PLAN_MAX = 0x2 + NL80211_SMPS_DYNAMIC = 0x2 + NL80211_SMPS_MAX = 0x2 + NL80211_SMPS_OFF = 0x0 + NL80211_SMPS_STATIC = 0x1 + NL80211_STA_BSS_PARAM_BEACON_INTERVAL = 0x5 + NL80211_STA_BSS_PARAM_CTS_PROT = 0x1 + NL80211_STA_BSS_PARAM_DTIM_PERIOD = 0x4 + NL80211_STA_BSS_PARAM_MAX = 0x5 + NL80211_STA_BSS_PARAM_SHORT_PREAMBLE = 0x2 + NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME = 0x3 + NL80211_STA_FLAG_ASSOCIATED = 0x7 + NL80211_STA_FLAG_AUTHENTICATED = 0x5 + NL80211_STA_FLAG_AUTHORIZED = 0x1 + NL80211_STA_FLAG_MAX = 0x7 + NL80211_STA_FLAG_MAX_OLD_API = 0x6 + NL80211_STA_FLAG_MFP = 0x4 + NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2 + NL80211_STA_FLAG_TDLS_PEER = 0x6 + NL80211_STA_FLAG_WME = 0x3 + NL80211_STA_INFO_ACK_SIGNAL_AVG = 0x23 + NL80211_STA_INFO_ACK_SIGNAL = 0x22 + NL80211_STA_INFO_AIRTIME_LINK_METRIC = 0x29 + NL80211_STA_INFO_AIRTIME_WEIGHT = 0x28 + NL80211_STA_INFO_ASSOC_AT_BOOTTIME = 0x2a + NL80211_STA_INFO_BEACON_LOSS = 0x12 + NL80211_STA_INFO_BEACON_RX = 0x1d + NL80211_STA_INFO_BEACON_SIGNAL_AVG = 0x1e + NL80211_STA_INFO_BSS_PARAM = 0xf + NL80211_STA_INFO_CHAIN_SIGNAL_AVG = 0x1a + NL80211_STA_INFO_CHAIN_SIGNAL = 0x19 + NL80211_STA_INFO_CONNECTED_TIME = 0x10 + NL80211_STA_INFO_CONNECTED_TO_AS = 0x2b + NL80211_STA_INFO_CONNECTED_TO_GATE = 0x26 + NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG = 0x23 + NL80211_STA_INFO_EXPECTED_THROUGHPUT = 0x1b + NL80211_STA_INFO_FCS_ERROR_COUNT = 0x25 + NL80211_STA_INFO_INACTIVE_TIME = 0x1 + NL80211_STA_INFO_LLID = 0x4 + NL80211_STA_INFO_LOCAL_PM = 0x14 + NL80211_STA_INFO_MAX = 0x2b + NL80211_STA_INFO_NONPEER_PM = 0x16 + NL80211_STA_INFO_PAD = 0x21 + NL80211_STA_INFO_PEER_PM = 0x15 + NL80211_STA_INFO_PLID = 0x5 + NL80211_STA_INFO_PLINK_STATE = 0x6 + NL80211_STA_INFO_RX_BITRATE = 0xe + NL80211_STA_INFO_RX_BYTES64 = 0x17 + NL80211_STA_INFO_RX_BYTES = 0x2 + NL80211_STA_INFO_RX_DROP_MISC = 0x1c + NL80211_STA_INFO_RX_DURATION = 0x20 + NL80211_STA_INFO_RX_MPDUS = 0x24 + NL80211_STA_INFO_RX_PACKETS = 0x9 + NL80211_STA_INFO_SIGNAL_AVG = 0xd + NL80211_STA_INFO_SIGNAL = 0x7 + NL80211_STA_INFO_STA_FLAGS = 0x11 + NL80211_STA_INFO_TID_STATS = 0x1f + NL80211_STA_INFO_T_OFFSET = 0x13 + NL80211_STA_INFO_TX_BITRATE = 0x8 + NL80211_STA_INFO_TX_BYTES64 = 0x18 + NL80211_STA_INFO_TX_BYTES = 0x3 + NL80211_STA_INFO_TX_DURATION = 0x27 + NL80211_STA_INFO_TX_FAILED = 0xc + NL80211_STA_INFO_TX_PACKETS = 0xa + NL80211_STA_INFO_TX_RETRIES = 0xb + NL80211_STA_WME_MAX = 0x2 + NL80211_STA_WME_MAX_SP = 0x2 + NL80211_STA_WME_UAPSD_QUEUES = 0x1 + NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY = 0x5 + NL80211_SURVEY_INFO_CHANNEL_TIME = 0x4 + NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY = 0x6 + NL80211_SURVEY_INFO_CHANNEL_TIME_RX = 0x7 + NL80211_SURVEY_INFO_CHANNEL_TIME_TX = 0x8 + NL80211_SURVEY_INFO_FREQUENCY = 0x1 + NL80211_SURVEY_INFO_FREQUENCY_OFFSET = 0xc + NL80211_SURVEY_INFO_IN_USE = 0x3 + NL80211_SURVEY_INFO_MAX = 0xc + NL80211_SURVEY_INFO_NOISE = 0x2 + NL80211_SURVEY_INFO_PAD = 0xa + NL80211_SURVEY_INFO_TIME_BSS_RX = 0xb + NL80211_SURVEY_INFO_TIME_BUSY = 0x5 + NL80211_SURVEY_INFO_TIME = 0x4 + NL80211_SURVEY_INFO_TIME_EXT_BUSY = 0x6 + NL80211_SURVEY_INFO_TIME_RX = 0x7 + NL80211_SURVEY_INFO_TIME_SCAN = 0x9 + NL80211_SURVEY_INFO_TIME_TX = 0x8 + NL80211_TDLS_DISABLE_LINK = 0x4 + NL80211_TDLS_DISCOVERY_REQ = 0x0 + NL80211_TDLS_ENABLE_LINK = 0x3 + NL80211_TDLS_PEER_HE = 0x8 + NL80211_TDLS_PEER_HT = 0x1 + NL80211_TDLS_PEER_VHT = 0x2 + NL80211_TDLS_PEER_WMM = 0x4 + NL80211_TDLS_SETUP = 0x1 + NL80211_TDLS_TEARDOWN = 0x2 + NL80211_TID_CONFIG_ATTR_AMPDU_CTRL = 0x9 + NL80211_TID_CONFIG_ATTR_AMSDU_CTRL = 0xb + NL80211_TID_CONFIG_ATTR_MAX = 0xd + NL80211_TID_CONFIG_ATTR_NOACK = 0x6 + NL80211_TID_CONFIG_ATTR_OVERRIDE = 0x4 + NL80211_TID_CONFIG_ATTR_PAD = 0x1 + NL80211_TID_CONFIG_ATTR_PEER_SUPP = 0x3 + NL80211_TID_CONFIG_ATTR_RETRY_LONG = 0x8 + NL80211_TID_CONFIG_ATTR_RETRY_SHORT = 0x7 + NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL = 0xa + NL80211_TID_CONFIG_ATTR_TIDS = 0x5 + NL80211_TID_CONFIG_ATTR_TX_RATE = 0xd + NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE = 0xc + NL80211_TID_CONFIG_ATTR_VIF_SUPP = 0x2 + NL80211_TID_CONFIG_DISABLE = 0x1 + NL80211_TID_CONFIG_ENABLE = 0x0 + NL80211_TID_STATS_MAX = 0x6 + NL80211_TID_STATS_PAD = 0x5 + NL80211_TID_STATS_RX_MSDU = 0x1 + NL80211_TID_STATS_TX_MSDU = 0x2 + NL80211_TID_STATS_TX_MSDU_FAILED = 0x4 + NL80211_TID_STATS_TX_MSDU_RETRIES = 0x3 + NL80211_TID_STATS_TXQ_STATS = 0x6 + NL80211_TIMEOUT_ASSOC = 0x3 + NL80211_TIMEOUT_AUTH = 0x2 + NL80211_TIMEOUT_SCAN = 0x1 + NL80211_TIMEOUT_UNSPECIFIED = 0x0 + NL80211_TKIP_DATA_OFFSET_ENCR_KEY = 0x0 + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY = 0x18 + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY = 0x10 + NL80211_TX_POWER_AUTOMATIC = 0x0 + NL80211_TX_POWER_FIXED = 0x2 + NL80211_TX_POWER_LIMITED = 0x1 + NL80211_TXQ_ATTR_AC = 0x1 + NL80211_TXQ_ATTR_AIFS = 0x5 + NL80211_TXQ_ATTR_CWMAX = 0x4 + NL80211_TXQ_ATTR_CWMIN = 0x3 + NL80211_TXQ_ATTR_MAX = 0x5 + NL80211_TXQ_ATTR_QUEUE = 0x1 + NL80211_TXQ_ATTR_TXOP = 0x2 + NL80211_TXQ_Q_BE = 0x2 + NL80211_TXQ_Q_BK = 0x3 + NL80211_TXQ_Q_VI = 0x1 + NL80211_TXQ_Q_VO = 0x0 + NL80211_TXQ_STATS_BACKLOG_BYTES = 0x1 + NL80211_TXQ_STATS_BACKLOG_PACKETS = 0x2 + NL80211_TXQ_STATS_COLLISIONS = 0x8 + NL80211_TXQ_STATS_DROPS = 0x4 + NL80211_TXQ_STATS_ECN_MARKS = 0x5 + NL80211_TXQ_STATS_FLOWS = 0x3 + NL80211_TXQ_STATS_MAX = 0xb + NL80211_TXQ_STATS_MAX_FLOWS = 0xb + NL80211_TXQ_STATS_OVERLIMIT = 0x6 + NL80211_TXQ_STATS_OVERMEMORY = 0x7 + NL80211_TXQ_STATS_TX_BYTES = 0x9 + NL80211_TXQ_STATS_TX_PACKETS = 0xa + NL80211_TX_RATE_AUTOMATIC = 0x0 + NL80211_TXRATE_DEFAULT_GI = 0x0 + NL80211_TX_RATE_FIXED = 0x2 + NL80211_TXRATE_FORCE_LGI = 0x2 + NL80211_TXRATE_FORCE_SGI = 0x1 + NL80211_TXRATE_GI = 0x4 + NL80211_TXRATE_HE = 0x5 + NL80211_TXRATE_HE_GI = 0x6 + NL80211_TXRATE_HE_LTF = 0x7 + NL80211_TXRATE_HT = 0x2 + NL80211_TXRATE_LEGACY = 0x1 + NL80211_TX_RATE_LIMITED = 0x1 + NL80211_TXRATE_MAX = 0x7 + NL80211_TXRATE_MCS = 0x2 + NL80211_TXRATE_VHT = 0x3 + NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT = 0x1 + NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX = 0x2 + NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL = 0x2 + NL80211_USER_REG_HINT_CELL_BASE = 0x1 + NL80211_USER_REG_HINT_INDOOR = 0x2 + NL80211_USER_REG_HINT_USER = 0x0 + NL80211_VENDOR_ID_IS_LINUX = 0x80000000 + NL80211_VHT_CAPABILITY_LEN = 0xc + NL80211_VHT_NSS_MAX = 0x8 + NL80211_WIPHY_NAME_MAXLEN = 0x40 + NL80211_WMMR_AIFSN = 0x3 + NL80211_WMMR_CW_MAX = 0x2 + NL80211_WMMR_CW_MIN = 0x1 + NL80211_WMMR_MAX = 0x4 + NL80211_WMMR_TXOP = 0x4 + NL80211_WOWLAN_PKTPAT_MASK = 0x1 + NL80211_WOWLAN_PKTPAT_OFFSET = 0x3 + NL80211_WOWLAN_PKTPAT_PATTERN = 0x2 + NL80211_WOWLAN_TCP_DATA_INTERVAL = 0x9 + NL80211_WOWLAN_TCP_DATA_PAYLOAD = 0x6 + NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ = 0x7 + NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN = 0x8 + NL80211_WOWLAN_TCP_DST_IPV4 = 0x2 + NL80211_WOWLAN_TCP_DST_MAC = 0x3 + NL80211_WOWLAN_TCP_DST_PORT = 0x5 + NL80211_WOWLAN_TCP_SRC_IPV4 = 0x1 + NL80211_WOWLAN_TCP_SRC_PORT = 0x4 + NL80211_WOWLAN_TCP_WAKE_MASK = 0xb + NL80211_WOWLAN_TCP_WAKE_PAYLOAD = 0xa + NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE = 0x8 + NL80211_WOWLAN_TRIG_ANY = 0x1 + NL80211_WOWLAN_TRIG_DISCONNECT = 0x2 + NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST = 0x7 + NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE = 0x6 + NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED = 0x5 + NL80211_WOWLAN_TRIG_MAGIC_PKT = 0x3 + NL80211_WOWLAN_TRIG_NET_DETECT = 0x12 + NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS = 0x13 + NL80211_WOWLAN_TRIG_PKT_PATTERN = 0x4 + NL80211_WOWLAN_TRIG_RFKILL_RELEASE = 0x9 + NL80211_WOWLAN_TRIG_TCP_CONNECTION = 0xe + NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211 = 0xa + NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN = 0xb + NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023 = 0xc + NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023_LEN = 0xd + NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST = 0x10 + NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH = 0xf + NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS = 0x11 + NL80211_WPA_VERSION_1 = 0x1 + NL80211_WPA_VERSION_2 = 0x2 + NL80211_WPA_VERSION_3 = 0x4 +) + +const ( + FRA_UNSPEC = 0x0 + FRA_DST = 0x1 + FRA_SRC = 0x2 + FRA_IIFNAME = 0x3 + FRA_GOTO = 0x4 + FRA_UNUSED2 = 0x5 + FRA_PRIORITY = 0x6 + FRA_UNUSED3 = 0x7 + FRA_UNUSED4 = 0x8 + FRA_UNUSED5 = 0x9 + FRA_FWMARK = 0xa + FRA_FLOW = 0xb + FRA_TUN_ID = 0xc + FRA_SUPPRESS_IFGROUP = 0xd + FRA_SUPPRESS_PREFIXLEN = 0xe + FRA_TABLE = 0xf + FRA_FWMASK = 0x10 + FRA_OIFNAME = 0x11 + FRA_PAD = 0x12 + FRA_L3MDEV = 0x13 + FRA_UID_RANGE = 0x14 + FRA_PROTOCOL = 0x15 + FRA_IP_PROTO = 0x16 + FRA_SPORT_RANGE = 0x17 + FRA_DPORT_RANGE = 0x18 + FR_ACT_UNSPEC = 0x0 + FR_ACT_TO_TBL = 0x1 + FR_ACT_GOTO = 0x2 + FR_ACT_NOP = 0x3 + FR_ACT_RES3 = 0x4 + FR_ACT_RES4 = 0x5 + FR_ACT_BLACKHOLE = 0x6 + FR_ACT_UNREACHABLE = 0x7 + FR_ACT_PROHIBIT = 0x8 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 235c62e46..7551af483 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux @@ -170,6 +170,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [16]byte +} + const ( SizeofSockaddrNFCLLCP = 0x58 SizeofIovec = 0x8 @@ -235,6 +240,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -245,6 +254,13 @@ type Sigset_t struct { const _C__NSIG = 0x41 +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ [116]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -306,6 +322,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 @@ -630,3 +655,36 @@ const ( PPS_GETCAP = 0x800470a3 PPS_FETCH = 0xc00470a4 ) + +const ( + PIDFD_NONBLOCK = 0x800 +) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint16 + _ [2]uint8 + Seq uint16 + _ uint16 + _ uint32 + _ uint32 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint32 + Atime uint32 + Atime_high uint32 + Dtime uint32 + Dtime_high uint32 + Ctime uint32 + Ctime_high uint32 + Cpid int32 + Lpid int32 + Nattch uint32 + _ uint32 + _ uint32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 99b1e5b6a..3e738ac0b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux @@ -173,6 +173,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 @@ -250,6 +255,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -260,6 +269,14 @@ type Sigset_t struct { const _C__NSIG = 0x41 +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -319,6 +336,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 @@ -648,3 +673,33 @@ const ( PPS_GETCAP = 0x800870a3 PPS_FETCH = 0xc00870a4 ) + +const ( + PIDFD_NONBLOCK = 0x800 +) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ [0]uint8 + Seq uint16 + _ uint16 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index cc8bba791..6183eef4a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux @@ -176,6 +176,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [16]byte +} + const ( SizeofSockaddrNFCLLCP = 0x58 SizeofIovec = 0x8 @@ -226,6 +231,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -236,6 +245,13 @@ type Sigset_t struct { const _C__NSIG = 0x41 +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ [116]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -297,6 +313,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 @@ -625,3 +650,36 @@ const ( PPS_GETCAP = 0x800470a3 PPS_FETCH = 0xc00470a4 ) + +const ( + PIDFD_NONBLOCK = 0x800 +) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint16 + _ [2]uint8 + Seq uint16 + _ uint16 + _ uint32 + _ uint32 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint32 + Atime uint32 + Atime_high uint32 + Dtime uint32 + Dtime_high uint32 + Ctime uint32 + Ctime_high uint32 + Cpid int32 + Lpid int32 + Nattch uint32 + _ uint32 + _ uint32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index fa8fe3a75..968cecb17 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux @@ -174,6 +174,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 @@ -229,6 +234,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -239,6 +248,14 @@ type Sigset_t struct { const _C__NSIG = 0x41 +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -298,6 +315,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 @@ -627,3 +652,33 @@ const ( PPS_GETCAP = 0x800870a3 PPS_FETCH = 0xc00870a4 ) + +const ( + PIDFD_NONBLOCK = 0x800 +) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ [0]uint8 + Seq uint16 + _ uint16 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go new file mode 100644 index 000000000..8fe4c522a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -0,0 +1,685 @@ +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build loong64 && linux +// +build loong64,linux + +package unix + +const ( + SizeofPtr = 0x8 + SizeofLong = 0x8 +) + +type ( + _C_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timex struct { + Modes uint32 + Offset int64 + Freq int64 + Maxerror int64 + Esterror int64 + Status int32 + Constant int64 + Precision int64 + Tolerance int64 + Time Timeval + Tick int64 + Ppsfreq int64 + Jitter int64 + Shift int32 + Stabil int64 + Jitcnt int64 + Calcnt int64 + Errcnt int64 + Stbcnt int64 + Tai int32 + _ [44]byte +} + +type Time_t int64 + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Stat_t struct { + Dev uint64 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + _ uint64 + Size int64 + Blksize int32 + _ int32 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + _ [2]int32 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [5]byte +} + +type Flock_t struct { + Type int16 + Whence int16 + Start int64 + Len int64 + Pid int32 + _ [4]byte +} + +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + +const ( + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + _ [4]byte +} + +type Cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + +const ( + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 +) + +const ( + SizeofSockFprog = 0x10 +) + +type PtraceRegs struct { + Regs [32]uint64 + Orig_a0 uint64 + Era uint64 + Badv uint64 + Reserved [10]uint64 +} + +type FdSet struct { + Bits [16]int64 +} + +type Sysinfo_t struct { + Uptime int64 + Loads [3]uint64 + Totalram uint64 + Freeram uint64 + Sharedram uint64 + Bufferram uint64 + Totalswap uint64 + Freeswap uint64 + Procs uint16 + Pad uint16 + Totalhigh uint64 + Freehigh uint64 + Unit uint32 + _ [0]int8 + _ [4]byte +} + +type Ustat_t struct { + Tfree int32 + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + _ [4]byte +} + +type EpollEvent struct { + Events uint32 + _ int32 + Fd int32 + Pad int32 +} + +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + +const ( + POLLRDHUP = 0x2000 +) + +type Sigset_t struct { + Val [16]uint64 +} + +const _C__NSIG = 0x41 + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [19]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Taskstats struct { + Version uint16 + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]int8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 + Thrashing_count uint64 + Thrashing_delay_total uint64 + Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 +} + +type cpuMask uint64 + +const ( + _NCPUBITS = 0x40 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]int8 + _ uint64 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + Start uint64 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +type TpacketHdr struct { + Status uint64 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 + _ [4]byte +} + +const ( + SizeofTpacketHdr = 0x20 +) + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 + _ [4]byte +} + +const ( + BLKPG = 0x1269 +) + +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x800870a1 + PPS_SETPARAMS = 0x400870a2 + PPS_GETCAP = 0x800870a3 + PPS_FETCH = 0xc00870a4 +) + +const ( + PIDFD_NONBLOCK = 0x800 +) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ [0]uint8 + Seq uint16 + _ uint16 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index e7fb8d9b7..11426a301 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux @@ -175,6 +175,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [16]byte +} + const ( SizeofSockaddrNFCLLCP = 0x58 SizeofIovec = 0x8 @@ -231,6 +236,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -241,6 +250,13 @@ type Sigset_t struct { const _C__NSIG = 0x80 +type Siginfo struct { + Signo int32 + Code int32 + Errno int32 + _ [116]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -302,6 +318,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 @@ -631,3 +656,35 @@ const ( PPS_GETCAP = 0x400470a3 PPS_FETCH = 0xc00470a4 ) + +const ( + PIDFD_NONBLOCK = 0x80 +) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ [0]uint8 + Seq uint16 + _ uint16 + _ uint32 + _ uint32 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint32 + Atime uint32 + Dtime uint32 + Ctime uint32 + Cpid int32 + Lpid int32 + Nattch uint32 + Atime_high uint16 + Dtime_high uint16 + Ctime_high uint16 + _ uint16 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 2fa61d593..ad1c3b3de 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux @@ -174,6 +174,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 @@ -232,6 +237,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -242,6 +251,14 @@ type Sigset_t struct { const _C__NSIG = 0x80 +type Siginfo struct { + Signo int32 + Code int32 + Errno int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -301,6 +318,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 @@ -630,3 +655,33 @@ const ( PPS_GETCAP = 0x400870a3 PPS_FETCH = 0xc00870a4 ) + +const ( + PIDFD_NONBLOCK = 0x80 +) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ [0]uint8 + Seq uint16 + _ uint16 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 7f3639933..15fd84e4d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux @@ -174,6 +174,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 @@ -232,6 +237,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -242,6 +251,14 @@ type Sigset_t struct { const _C__NSIG = 0x80 +type Siginfo struct { + Signo int32 + Code int32 + Errno int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -301,6 +318,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 @@ -630,3 +655,33 @@ const ( PPS_GETCAP = 0x400870a3 PPS_FETCH = 0xc00870a4 ) + +const ( + PIDFD_NONBLOCK = 0x80 +) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ [0]uint8 + Seq uint16 + _ uint16 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index f3c20cb86..49c49825a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux @@ -175,6 +175,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [16]byte +} + const ( SizeofSockaddrNFCLLCP = 0x58 SizeofIovec = 0x8 @@ -231,6 +236,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -241,6 +250,13 @@ type Sigset_t struct { const _C__NSIG = 0x80 +type Siginfo struct { + Signo int32 + Code int32 + Errno int32 + _ [116]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -302,6 +318,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 @@ -631,3 +656,35 @@ const ( PPS_GETCAP = 0x400470a3 PPS_FETCH = 0xc00470a4 ) + +const ( + PIDFD_NONBLOCK = 0x80 +) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ [0]uint8 + Seq uint16 + _ uint16 + _ uint32 + _ uint32 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint32 + Atime uint32 + Dtime uint32 + Ctime uint32 + Cpid int32 + Lpid int32 + Nattch uint32 + Atime_high uint16 + Dtime_high uint16 + Ctime_high uint16 + _ uint16 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 885d27950..cd36d0da2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux @@ -176,6 +176,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [16]byte +} + const ( SizeofSockaddrNFCLLCP = 0x58 SizeofIovec = 0x8 @@ -238,6 +243,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -248,6 +257,13 @@ type Sigset_t struct { const _C__NSIG = 0x41 +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ [116]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -309,6 +325,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 @@ -637,3 +662,37 @@ const ( PPS_GETCAP = 0x400470a3 PPS_FETCH = 0xc00470a4 ) + +const ( + PIDFD_NONBLOCK = 0x800 +) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + Seq uint32 + _ uint32 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Atime_high uint32 + Atime uint32 + Dtime_high uint32 + Dtime uint32 + Ctime_high uint32 + Ctime uint32 + _ uint32 + Segsz uint32 + Cpid int32 + Lpid int32 + Nattch uint32 + _ uint32 + _ uint32 + _ [4]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index a94eb8e18..8c6fce039 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux @@ -175,6 +175,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 @@ -239,6 +244,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -249,6 +258,14 @@ type Sigset_t struct { const _C__NSIG = 0x41 +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -308,6 +325,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 @@ -637,3 +662,32 @@ const ( PPS_GETCAP = 0x400870a3 PPS_FETCH = 0xc00870a4 ) + +const ( + PIDFD_NONBLOCK = 0x800 +) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + Seq uint32 + _ uint32 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Atime int64 + Dtime int64 + Ctime int64 + Segsz uint64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 659e32ebd..20910f2ad 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux @@ -175,6 +175,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 @@ -239,6 +244,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -249,6 +258,14 @@ type Sigset_t struct { const _C__NSIG = 0x41 +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -308,6 +325,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 @@ -637,3 +662,32 @@ const ( PPS_GETCAP = 0x400870a3 PPS_FETCH = 0xc00870a4 ) + +const ( + PIDFD_NONBLOCK = 0x800 +) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + Seq uint32 + _ uint32 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Atime int64 + Dtime int64 + Ctime int64 + Segsz uint64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index ab8ec604f..71b7b3331 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux @@ -174,6 +174,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 @@ -257,6 +262,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -267,6 +276,14 @@ type Sigset_t struct { const _C__NSIG = 0x41 +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -326,6 +343,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 @@ -655,3 +680,33 @@ const ( PPS_GETCAP = 0x800870a3 PPS_FETCH = 0xc00870a4 ) + +const ( + PIDFD_NONBLOCK = 0x800 +) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ [0]uint8 + Seq uint16 + _ uint16 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 3ec08237f..71184cc2c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux @@ -173,6 +173,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 @@ -205,8 +210,8 @@ type PtraceFpregs struct { } type PtracePer struct { - _ [0]uint64 - _ [32]byte + Control_regs [3]uint64 + _ [8]byte Starting_addr uint64 Ending_addr uint64 Perc_atmid uint16 @@ -252,6 +257,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -262,6 +271,14 @@ type Sigset_t struct { const _C__NSIG = 0x41 +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -321,6 +338,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 @@ -651,3 +676,32 @@ const ( PPS_GETCAP = 0x800870a3 PPS_FETCH = 0xc00870a4 ) + +const ( + PIDFD_NONBLOCK = 0x800 +) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ uint16 + Seq uint16 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 23d474470..06156285d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux @@ -177,6 +177,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 @@ -234,6 +239,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x400000 +) + const ( POLLRDHUP = 0x800 ) @@ -244,6 +253,14 @@ type Sigset_t struct { const _C__NSIG = 0x41 +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -303,6 +320,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 @@ -632,3 +657,32 @@ const ( PPS_GETCAP = 0x400870a3 PPS_FETCH = 0xc00870a4 ) + +const ( + PIDFD_NONBLOCK = 0x4000 +) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ uint16 + Seq uint16 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Atime int64 + Dtime int64 + Ctime int64 + Segsz uint64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 2a8b1e6f7..2ed718ca0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -94,10 +94,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte Pad_cgo_0 [2]byte Mount_info [160]byte } @@ -564,12 +564,11 @@ type Uvmexp struct { Kmapent int32 } -const SizeofClockinfo = 0x14 +const SizeofClockinfo = 0x10 type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index b1759cf70..b4fb97ebe 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -96,10 +96,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } @@ -564,12 +564,11 @@ type Uvmexp struct { Kmapent int32 } -const SizeofClockinfo = 0x14 +const SizeofClockinfo = 0x10 type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index e807de206..2c4675040 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -98,10 +98,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } @@ -565,12 +565,11 @@ type Uvmexp struct { Kmapent int32 } -const SizeofClockinfo = 0x14 +const SizeofClockinfo = 0x10 type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index ff3aecaee..ddee04514 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -94,10 +94,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } @@ -558,12 +558,11 @@ type Uvmexp struct { Kmapent int32 } -const SizeofClockinfo = 0x14 +const SizeofClockinfo = 0x10 type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index 9ecda6917..eb13d4e8b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -94,10 +94,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } @@ -558,12 +558,11 @@ type Uvmexp struct { Kmapent int32 } -const SizeofClockinfo = 0x14 +const SizeofClockinfo = 0x10 type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 85effef9c..ad4aad279 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -440,3 +440,43 @@ const ( POLLWRBAND = 0x100 POLLWRNORM = 0x4 ) + +type fileObj struct { + Atim Timespec + Mtim Timespec + Ctim Timespec + Pad [3]uint64 + Name *int8 +} + +type portEvent struct { + Events int32 + Source uint16 + Pad uint16 + Object uint64 + User *byte +} + +const ( + PORT_SOURCE_AIO = 0x1 + PORT_SOURCE_TIMER = 0x2 + PORT_SOURCE_USER = 0x3 + PORT_SOURCE_FD = 0x4 + PORT_SOURCE_ALERT = 0x5 + PORT_SOURCE_MQ = 0x6 + PORT_SOURCE_FILE = 0x7 + PORT_ALERT_SET = 0x1 + PORT_ALERT_UPDATE = 0x2 + PORT_ALERT_INVALID = 0x3 + FILE_ACCESS = 0x1 + FILE_MODIFIED = 0x2 + FILE_ATTRIB = 0x4 + FILE_TRUNC = 0x100000 + FILE_NOFOLLOW = 0x10000000 + FILE_DELETE = 0x10 + FILE_RENAME_TO = 0x20 + FILE_RENAME_FROM = 0x40 + UNMOUNTED = 0x20000000 + MOUNTEDOVER = 0x40000000 + FILE_EXCEPTION = 0x60000070 +) diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go index e4c62289f..8a7392c4a 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.10 // +build go1.10 package bidirule diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go index 02b9e1e9d..bb0a92001 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.10 // +build !go1.10 package bidirule diff --git a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go index d8c94e1bd..42fa8d72c 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.10 && !go1.13 // +build go1.10,!go1.13 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go index 16b11db53..56a0e1ea2 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.13 && !go1.14 // +build go1.13,!go1.14 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go index 647f2d427..baacf32b4 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.14 && !go1.16 // +build go1.14,!go1.16 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go index c937d0976..f248effae 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.16 // +build go1.16 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go index 0ca0193eb..f517fdb20 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build !go1.10 // +build !go1.10 package bidi diff --git a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go index 26fbd55a1..f5a078827 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.10 && !go1.13 // +build go1.10,!go1.13 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go index 2c58f09ba..cb7239c43 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.13 && !go1.14 // +build go1.13,!go1.14 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go index 7e1ae096e..11b273300 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.14 && !go1.16 // +build go1.14,!go1.16 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go index 9ea1b4214..96a130d30 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.16 // +build go1.16 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go index 942906929..0175eae50 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build !go1.10 // +build !go1.10 package norm diff --git a/vendor/google.golang.org/appengine/datastore/save.go b/vendor/google.golang.org/appengine/datastore/save.go index 7b045a595..b9a2a8f07 100644 --- a/vendor/google.golang.org/appengine/datastore/save.go +++ b/vendor/google.golang.org/appengine/datastore/save.go @@ -310,14 +310,14 @@ func propertiesToProto(defaultAppID string, key *Key, props []Property) (*pb.Ent func isEmptyValue(v reflect.Value) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - // TODO(perfomance): Only reflect.String needed, other property types are not supported (copy/paste from json package) + // TODO(performance): Only reflect.String needed, other property types are not supported (copy/paste from json package) return v.Len() == 0 case reflect.Bool: return !v.Bool() case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() == 0 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - // TODO(perfomance): Uint* are unsupported property types - should be removed (copy/paste from json package) + // TODO(performance): Uint* are unsupported property types - should be removed (copy/paste from json package) return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index e79a53884..f34a38e4e 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.12.2 // source: google/rpc/status.proto package status @@ -24,7 +24,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" anypb "google.golang.org/protobuf/types/known/anypb" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The `Status` type defines a logical error model that is suitable for // different programming environments, including REST APIs and RPC APIs. It is // used by [gRPC](https://github.com/grpc). Each `Status` message contains diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml deleted file mode 100644 index 3e495fa23..000000000 --- a/vendor/google.golang.org/grpc/.travis.yml +++ /dev/null @@ -1,42 +0,0 @@ -language: go - -matrix: - include: - - go: 1.14.x - env: VET=1 GO111MODULE=on - - go: 1.14.x - env: RACE=1 GO111MODULE=on - - go: 1.14.x - env: RUN386=1 - - go: 1.14.x - env: GRPC_GO_RETRY=on - - go: 1.14.x - env: TESTEXTRAS=1 - - go: 1.13.x - env: GO111MODULE=on - - go: 1.12.x - env: GO111MODULE=on - - go: 1.11.x # Keep until interop tests no longer require Go1.11 - env: GO111MODULE=on - -go_import_path: google.golang.org/grpc - -before_install: - - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi - - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi - - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi - - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi - -install: - - try3() { eval "$*" || eval "$*" || eval "$*"; } - - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi' - - if [[ -n "${GAE}" ]]; then source ./install_gae.sh; make testappenginedeps; fi - - if [[ -n "${VET}" ]]; then ./vet.sh -install; fi - -script: - - set -e - - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi - - if [[ -n "${VET}" ]]; then ./vet.sh; fi - - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi - - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi - - make test diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index cd03f8c76..52338d004 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -53,9 +53,8 @@ How to get your contributions merged smoothly and quickly. - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `make all` to test everything, OR - - `make vet` to catch vet errors - - `make test` to run the tests - - `make testrace` to run tests in race mode + - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode - Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 093c82b3a..c6672c0a3 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -8,17 +8,18 @@ See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIB for general contribution guidelines. ## Maintainers (in alphabetical order) -- [canguler](https://github.com/canguler), Google LLC + - [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [jadekler](https://github.com/jadekler), Google LLC - [menghanl](https://github.com/menghanl), Google LLC - [srini100](https://github.com/srini100), Google LLC ## Emeritus Maintainers (in alphabetical order) - [adelez](https://github.com/adelez), Google LLC +- [canguler](https://github.com/canguler), Google LLC - [iamqizhao](https://github.com/iamqizhao), Google LLC +- [jadekler](https://github.com/jadekler), Google LLC - [jtattermusch](https://github.com/jtattermusch), Google LLC - [lyuxuan](https://github.com/lyuxuan), Google LLC - [makmukhi](https://github.com/makmukhi), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index 3f661a787..1f8960922 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -1,13 +1,13 @@ all: vet test testrace -build: deps +build: go build google.golang.org/grpc/... clean: go clean -i google.golang.org/grpc/... deps: - go get -d -v google.golang.org/grpc/... + GO111MODULE=on go get -d -v google.golang.org/grpc/... proto: @ if ! which protoc > /dev/null; then \ @@ -16,30 +16,18 @@ proto: fi go generate google.golang.org/grpc/... -test: testdeps +test: go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... -testsubmodule: testdeps +testsubmodule: cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/... -testappengine: testappenginedeps - goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/... - -testappenginedeps: - goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/... - -testdeps: - go get -d -v -t google.golang.org/grpc/... - -testrace: testdeps +testrace: go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... -updatedeps: - go get -d -v -u -f google.golang.org/grpc/... - -updatetestdeps: - go get -d -v -t -u -f google.golang.org/grpc/... +testdeps: + GO111MODULE=on go get -d -v -t google.golang.org/grpc/... vet: vetdeps ./vet.sh @@ -51,14 +39,8 @@ vetdeps: all \ build \ clean \ - deps \ proto \ test \ - testappengine \ - testappenginedeps \ - testdeps \ testrace \ - updatedeps \ - updatetestdeps \ vet \ vetdeps diff --git a/vendor/google.golang.org/grpc/NOTICE.txt b/vendor/google.golang.org/grpc/NOTICE.txt new file mode 100644 index 000000000..530197749 --- /dev/null +++ b/vendor/google.golang.org/grpc/NOTICE.txt @@ -0,0 +1,13 @@ +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 3949a683f..0e6ae69a5 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -136,6 +136,6 @@ errors. [Go module]: https://github.com/golang/go/wiki/Modules [gRPC]: https://grpc.io [Go gRPC docs]: https://grpc.io/docs/languages/go -[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696 +[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608 [quick start]: https://grpc.io/docs/languages/go/quickstart [go-releases]: https://golang.org/doc/devel/release.html diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md new file mode 100644 index 000000000..be6e10870 --- /dev/null +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -0,0 +1,3 @@ +# Security Policy + +For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 3220d87be..ae13ddac1 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -25,55 +25,77 @@ // later release. package attributes -import "fmt" - // Attributes is an immutable struct for storing and retrieving generic // key/value pairs. Keys must be hashable, and users should define their own -// types for keys. +// types for keys. Values should not be modified after they are added to an +// Attributes or if they were received from one. If values implement 'Equal(o +// interface{}) bool', it will be called by (*Attributes).Equal to determine +// whether two values with the same key should be considered equal. type Attributes struct { m map[interface{}]interface{} } -// New returns a new Attributes containing all key/value pairs in kvs. If the -// same key appears multiple times, the last value overwrites all previous -// values for that key. Panics if len(kvs) is not even. -func New(kvs ...interface{}) *Attributes { - if len(kvs)%2 != 0 { - panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) - } - a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)} - for i := 0; i < len(kvs)/2; i++ { - a.m[kvs[i*2]] = kvs[i*2+1] - } - return a +// New returns a new Attributes containing the key/value pair. +func New(key, value interface{}) *Attributes { + return &Attributes{m: map[interface{}]interface{}{key: value}} } -// WithValues returns a new Attributes containing all key/value pairs in a and -// kvs. Panics if len(kvs) is not even. If the same key appears multiple -// times, the last value overwrites all previous values for that key. To -// remove an existing key, use a nil value. -func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { +// WithValue returns a new Attributes containing the previous keys and values +// and the new key/value pair. If the same key appears multiple times, the +// last value overwrites all previous values for that key. To remove an +// existing key, use a nil value. value should not be modified later. +func (a *Attributes) WithValue(key, value interface{}) *Attributes { if a == nil { - return New(kvs...) + return New(key, value) } - if len(kvs)%2 != 0 { - panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) - } - n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)} + n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} for k, v := range a.m { n.m[k] = v } - for i := 0; i < len(kvs)/2; i++ { - n.m[kvs[i*2]] = kvs[i*2+1] - } + n.m[key] = value return n } // Value returns the value associated with these attributes for key, or nil if -// no value is associated with key. +// no value is associated with key. The returned value should not be modified. func (a *Attributes) Value(key interface{}) interface{} { if a == nil { return nil } return a.m[key] } + +// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) +// bool' is implemented for a value in the attributes, it is called to +// determine if the value matches the one stored in the other attributes. If +// Equal is not implemented, standard equality is used to determine if the two +// values are equal. Note that some types (e.g. maps) aren't comparable by +// default, so they must be wrapped in a struct, or in an alias type, with Equal +// defined. +func (a *Attributes) Equal(o *Attributes) bool { + if a == nil && o == nil { + return true + } + if a == nil || o == nil { + return false + } + if len(a.m) != len(o.m) { + return false + } + for k, v := range a.m { + ov, ok := o.m[k] + if !ok { + // o missing element of a + return false + } + if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if !eq.Equal(ov) { + return false + } + } else if v != ov { + // Fallback to a standard equality check if Value is unimplemented. + return false + } + } + return true +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 8bf359dbf..f7a7697ca 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -27,6 +27,7 @@ import ( "net" "strings" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" @@ -75,24 +76,26 @@ func Get(name string) Builder { return nil } -// SubConn represents a gRPC sub connection. -// Each sub connection contains a list of addresses. gRPC will -// try to connect to them (in sequence), and stop trying the -// remainder once one connection is successful. +// A SubConn represents a single connection to a gRPC backend service. // -// The reconnect backoff will be applied on the list, not a single address. -// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// Each SubConn contains a list of addresses. // -// All SubConns start in IDLE, and will not try to connect. To trigger -// the connecting, Balancers must call Connect. -// When the connection encounters an error, it will reconnect immediately. -// When the connection becomes IDLE, it will not reconnect unless Connect is -// called. +// All SubConns start in IDLE, and will not try to connect. To trigger the +// connecting, Balancers must call Connect. If a connection re-enters IDLE, +// Balancers must call Connect again to trigger a new connection attempt. // -// This interface is to be implemented by gRPC. Users should not need a -// brand new implementation of this interface. For the situations like -// testing, the new implementation should embed this interface. This allows -// gRPC to add new methods to this interface. +// gRPC will try to connect to the addresses in sequence, and stop trying the +// remainder once the first connection is successful. If an attempt to connect +// to all addresses encounters an error, the SubConn will enter +// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. +// +// Once established, if a connection is lost, the SubConn will transition +// directly to IDLE. +// +// This interface is to be implemented by gRPC. Users should not need their own +// implementation of this interface. For situations like testing, any +// implementations should embed this interface. This allows gRPC to add new +// methods to this interface. type SubConn interface { // UpdateAddresses updates the addresses used in this SubConn. // gRPC checks if currently-connected address is still in the new list. @@ -101,6 +104,9 @@ type SubConn interface { // a new connection will be created. // // This will trigger a state transition for the SubConn. + // + // Deprecated: This method is now part of the ClientConn interface and will + // eventually be removed from here. UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() @@ -143,6 +149,13 @@ type ClientConn interface { // RemoveSubConn removes the SubConn from ClientConn. // The SubConn will be shutdown. RemoveSubConn(SubConn) + // UpdateAddresses updates the addresses used in the passed in SubConn. + // gRPC checks if the currently connected address is still in the new list. + // If so, the connection will be kept. Else, the connection will be + // gracefully closed, and a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses(SubConn, []resolver.Address) // UpdateState notifies gRPC that the balancer's internal state has // changed. @@ -162,21 +175,32 @@ type ClientConn interface { // BuildOptions contains additional information for Build. type BuildOptions struct { - // DialCreds is the transport credential the Balancer implementation can - // use to dial to a remote load balancer server. The Balancer implementations - // can ignore this if it does not need to talk to another party securely. + // DialCreds is the transport credentials to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. DialCreds credentials.TransportCredentials - // CredsBundle is the credentials bundle that the Balancer can use. + // CredsBundle is the credentials bundle to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. CredsBundle credentials.Bundle - // Dialer is the custom dialer the Balancer implementation can use to dial - // to a remote load balancer server. The Balancer implementations - // can ignore this if it doesn't need to talk to remote balancer. + // Dialer is the custom dialer to use when communicating with a remote load + // balancer server. Balancer implementations which do not communicate with a + // remote load balancer server can ignore this field. Dialer func(context.Context, string) (net.Conn, error) - // ChannelzParentID is the entity parent's channelz unique identification number. - ChannelzParentID int64 - // Target contains the parsed address info of the dial target. It is the same resolver.Target as - // passed to the resolver. - // See the documentation for the resolver.Target type for details about what it contains. + // Authority is the server name to use as part of the authentication + // handshake when communicating with a remote load balancer server. Balancer + // implementations which do not communicate with a remote load balancer + // server can ignore this field. + Authority string + // ChannelzParentID is the parent ClientConn's channelz ID. + ChannelzParentID *channelz.Identifier + // CustomUserAgent is the custom user agent set on the parent ClientConn. + // The balancer should set the same custom user agent if it creates a + // ClientConn. + CustomUserAgent string + // Target contains the parsed address info of the dial target. It is the + // same resolver.Target as passed to the resolver. See the documentation for + // the resolver.Target type for details about what it contains. Target resolver.Target } @@ -312,6 +336,20 @@ type Balancer interface { Close() } +// ExitIdler is an optional interface for balancers to implement. If +// implemented, ExitIdle will be called when ClientConn.Connect is called, if +// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause +// all SubConns to connect. +// +// Notice: it will be required for all balancers to implement this in a future +// release. +type ExitIdler interface { + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() +} + // SubConnState describes the state of a SubConn. type SubConnState struct { // ConnectivityState is the connectivity state of the SubConn. @@ -339,8 +377,10 @@ var ErrBadResolverState = errors.New("bad resolver state") // // It's not thread safe. type ConnectivityStateEvaluator struct { - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. } // RecordTransition records state change happening in subConn and based on that @@ -348,9 +388,11 @@ type ConnectivityStateEvaluator struct { // // - If at least one SubConn in Ready, the aggregated state is Ready; // - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else the aggregated state is TransientFailure. +// - Else if at least one SubConn is TransientFailure, the aggregated state is Transient Failure; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else there are no subconns and the aggregated state is Transient Failure // -// Idle and Shutdown are not considered. +// Shutdown is not considered. func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { // Update counters. for idx, state := range []connectivity.State{oldState, newState} { @@ -360,6 +402,10 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne cse.numReady += updateVal case connectivity.Connecting: cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal } } @@ -370,5 +416,11 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne if cse.numConnecting > 0 { return connectivity.Connecting } + if cse.numTransientFailure > 0 { + return connectivity.TransientFailure + } + if cse.numIdle > 0 { + return connectivity.Idle + } return connectivity.TransientFailure } diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 32d782f1c..a67074a3a 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -41,7 +41,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) cc: cc, pickerBuilder: bb.pickerBuilder, - subConns: make(map[resolver.Address]balancer.SubConn), + subConns: resolver.NewAddressMap(), scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, @@ -64,7 +64,7 @@ type baseBalancer struct { csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State - subConns map[resolver.Address]balancer.SubConn + subConns *resolver.AddressMap scStates map[balancer.SubConn]connectivity.State picker balancer.Picker config Config @@ -75,7 +75,7 @@ type baseBalancer struct { func (b *baseBalancer) ResolverError(err error) { b.resolverErr = err - if len(b.subConns) == 0 { + if b.subConns.Len() == 0 { b.state = connectivity.TransientFailure } @@ -99,26 +99,29 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // Successful resolution; clear resolver error and ensure we return nil. b.resolverErr = nil // addrsSet is the set converted from addrs, it's used for quick lookup of an address. - addrsSet := make(map[resolver.Address]struct{}) + addrsSet := resolver.NewAddressMap() for _, a := range s.ResolverState.Addresses { - addrsSet[a] = struct{}{} - if _, ok := b.subConns[a]; !ok { + addrsSet.Set(a, nil) + if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue } - b.subConns[a] = sc + b.subConns.Set(a, sc) b.scStates[sc] = connectivity.Idle + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) sc.Connect() } } - for a, sc := range b.subConns { + for _, a := range b.subConns.Keys() { + sci, _ := b.subConns.Get(a) + sc := sci.(balancer.SubConn) // a was removed by resolver. - if _, ok := addrsSet[a]; !ok { + if _, ok := addrsSet.Get(a); !ok { b.cc.RemoveSubConn(sc) - delete(b.subConns, a) + b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // The entry will be deleted in UpdateSubConnState. } @@ -160,7 +163,9 @@ func (b *baseBalancer) regeneratePicker() { readySCs := make(map[balancer.SubConn]SubConnInfo) // Filter out all ready SCs from full subConn map. - for addr, sc := range b.subConns { + for _, addr := range b.subConns.Keys() { + sci, _ := b.subConns.Get(addr) + sc := sci.(balancer.SubConn) if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { readySCs[sc] = SubConnInfo{Address: addr} } @@ -180,10 +185,14 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } return } - if oldS == connectivity.TransientFailure && s == connectivity.Connecting { - // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent + if oldS == connectivity.TransientFailure && + (s == connectivity.Connecting || s == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or // CONNECTING transitions to prevent the aggregated state from being // always CONNECTING when many backends exist but are all down. + if s == connectivity.Idle { + sc.Connect() + } return } b.scStates[sc] = s @@ -209,7 +218,6 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su b.state == connectivity.TransientFailure { b.regeneratePicker() } - b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) } @@ -218,6 +226,11 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su func (b *baseBalancer) Close() { } +// ExitIdle is a nop because the base balancer attempts to stay connected to +// all SubConns at all times. +func (b *baseBalancer) ExitIdle() { +} + // NewErrPicker returns a Picker that always returns err on Pick(). func NewErrPicker(err error) balancer.Picker { return &errPicker{err: err} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go index a24264a34..4ecfa1c21 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -39,7 +39,7 @@ type State struct { // Set returns a copy of the provided state with attributes containing s. s's // data should not be mutated after calling Set. func Set(state resolver.State, s *State) resolver.State { - state.Attributes = state.Attributes.WithValues(key, s) + state.Attributes = state.Attributes.WithValue(key, s) return state } diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 43c2a1537..274eb2f85 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -47,11 +47,11 @@ func init() { type rrPickerBuilder struct{} func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { - logger.Infof("roundrobinPicker: newPicker called with info: %v", info) + logger.Infof("roundrobinPicker: Build called with info: %v", info) if len(info.ReadySCs) == 0 { return base.NewErrPicker(balancer.ErrNoSubConnAvailable) } - var scs []balancer.SubConn + scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) for sc := range info.ReadySCs { scs = append(scs, sc) } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 11e592aab..b1c23eaae 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -20,85 +20,178 @@ package grpc import ( "fmt" + "strings" "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) -// scStateUpdate contains the subConn and the new state it changed to. -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} - -// ccBalancerWrapper is a wrapper on top of cc for balancers. -// It implements balancer.ClientConn interface. +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen synchronously and in order. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. type ccBalancerWrapper struct { - cc *ClientConn - balancerMu sync.Mutex // synchronizes calls to the balancer - balancer balancer.Balancer - scBuffer *buffer.Unbounded - done *grpcsync.Event + cc *ClientConn + + // Since these fields are accessed only from handleXxx() methods which are + // synchronized by the watcher goroutine, we do not need a mutex to protect + // these fields. + balancer *gracefulswitch.Balancer + curBalancerName string - mu sync.Mutex - subConns map[*acBalancerWrapper]struct{} + updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). + resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. + closed *grpcsync.Event // Indicates if close has been called. + done *grpcsync.Event // Indicates if close has completed its work. } -func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { +// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer +// is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { ccb := &ccBalancerWrapper{ cc: cc, - scBuffer: buffer.NewUnbounded(), + updateCh: buffer.NewUnbounded(), + resultCh: buffer.NewUnbounded(), + closed: grpcsync.NewEvent(), done: grpcsync.NewEvent(), - subConns: make(map[*acBalancerWrapper]struct{}), } go ccb.watcher() - ccb.balancer = b.Build(ccb, bopts) + ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// watcher balancer functions sequentially, so the balancer can be implemented -// lock-free. +// The following xxxUpdate structs wrap the arguments received as part of the +// corresponding update. The watcher goroutine uses the 'type' of the update to +// invoke the appropriate handler routine to handle the update. + +type ccStateUpdate struct { + ccs *balancer.ClientConnState +} + +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State + err error +} + +type exitIdleUpdate struct{} + +type resolverErrorUpdate struct { + err error +} + +type switchToUpdate struct { + name string +} + +type subConnUpdate struct { + acbw *acBalancerWrapper +} + +// watcher is a long-running goroutine which reads updates from a channel and +// invokes corresponding methods on the underlying balancer. It ensures that +// these methods are invoked in a synchronous fashion. It also ensures that +// these methods are invoked in the order in which the updates were received. func (ccb *ccBalancerWrapper) watcher() { for { select { - case t := <-ccb.scBuffer.Get(): - ccb.scBuffer.Load() - if ccb.done.HasFired() { + case u := <-ccb.updateCh.Get(): + ccb.updateCh.Load() + if ccb.closed.HasFired() { break } - ccb.balancerMu.Lock() - su := t.(*scStateUpdate) - ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) - ccb.balancerMu.Unlock() - case <-ccb.done.Done(): + switch update := u.(type) { + case *ccStateUpdate: + ccb.handleClientConnStateChange(update.ccs) + case *scStateUpdate: + ccb.handleSubConnStateChange(update) + case *exitIdleUpdate: + ccb.handleExitIdle() + case *resolverErrorUpdate: + ccb.handleResolverError(update.err) + case *switchToUpdate: + ccb.handleSwitchTo(update.name) + case *subConnUpdate: + ccb.handleRemoveSubConn(update.acbw) + default: + logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) + } + case <-ccb.closed.Done(): } - if ccb.done.HasFired() { - ccb.balancer.Close() - ccb.mu.Lock() - scs := ccb.subConns - ccb.subConns = nil - ccb.mu.Unlock() - for acbw := range scs { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) - } - ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) + if ccb.closed.HasFired() { + ccb.handleClose() return } } } -func (ccb *ccBalancerWrapper) close() { - ccb.done.Fire() +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. +// +// Unlike other methods invoked by grpc to push updates to the underlying +// balancer, this method cannot simply push the update onto the update channel +// and return. It needs to return the error returned by the underlying balancer +// back to grpc which propagates that to the resolver. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) + + var res interface{} + select { + case res = <-ccb.resultCh.Get(): + ccb.resultCh.Load() + case <-ccb.closed.Done(): + // Return early if the balancer wrapper is closed while we are waiting for + // the underlying balancer to process a ClientConnState update. + return nil + } + // If the returned error is nil, attempting to type assert to error leads to + // panic. So, this needs to handled separately. + if res == nil { + return nil + } + return res.(error) } -func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { +// handleClientConnStateChange handles a ClientConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +// +// If the addresses specified in the update contain addresses of type "grpclb" +// and the selected LB policy is not "grpclb", these addresses will be filtered +// out and ccs will be modified with the updated address list. +func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { + if ccb.curBalancerName != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue + } + addrs = append(addrs, addr) + } + ccs.ResolverState.Addresses = addrs + } + ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) +} + +// updateSubConnState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { // When updating addresses for a SubConn, if the address in use is not in // the new addresses, the old ac will be tearDown() and a new ac will be // created. tearDown() generates a state change with Shutdown state, we @@ -109,66 +202,143 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co if sc == nil { return } - ccb.scBuffer.Put(&scStateUpdate{ + ccb.updateCh.Put(&scStateUpdate{ sc: sc, state: s, err: err, }) } -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() - return ccb.balancer.UpdateClientConnState(*ccs) +// handleSubConnStateChange handles a SubConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { + ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) +} + +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.updateCh.Put(&exitIdleUpdate{}) +} + +func (ccb *ccBalancerWrapper) handleExitIdle() { + if ccb.cc.GetState() != connectivity.Idle { + return + } + ccb.balancer.ExitIdle() } func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.balancerMu.Lock() + ccb.updateCh.Put(&resolverErrorUpdate{err: err}) +} + +func (ccb *ccBalancerWrapper) handleResolverError(err error) { ccb.balancer.ResolverError(err) - ccb.balancerMu.Unlock() +} + +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.updateCh.Put(&switchToUpdate{name: name}) +} + +// handleSwitchTo handles a balancer switch update from the update channel. It +// calls the SwitchTo() method on the gracefulswitch.Balancer with a +// balancer.Builder corresponding to name. If no balancer.Builder is registered +// for the given name, it uses the default LB policy which is "pick_first". +func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { + // TODO: Other languages use case-insensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + + // TODO: Ensure that name is a registered LB policy when we get here. + // We currently only validate the `loadBalancingConfig` field. We need to do + // the same for the `loadBalancingPolicy` field and reject the service config + // if the specified policy is not registered. + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() +} + +// handleRemoveSucConn handles a request from the underlying balancer to remove +// a subConn. +// +// See comments in RemoveSubConn() for more details. +func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) close() { + ccb.closed.Fire() + <-ccb.done.Done() +} + +func (ccb *ccBalancerWrapper) handleClose() { + ccb.balancer.Close() + ccb.done.Fire() } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { if len(addrs) <= 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") - } ac, err := ccb.cc.newAddrConn(addrs, opts) if err != nil { + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } acbw := &acBalancerWrapper{ac: ac} acbw.ac.mu.Lock() ac.acbw = acbw acbw.ac.mu.Unlock() - ccb.subConns[acbw] = struct{}{} return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it + // was required to handle the RemoveSubConn() method asynchronously by pushing + // the update onto the update channel. This was done to avoid a deadlock as + // switchBalancer() was holding cc.mu when calling Close() on the old + // balancer, which would in turn call RemoveSubConn(). + // + // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this + // asynchronously is probably not required anymore since the switchTo() method + // handles the balancer switch by pushing the update onto the channel. + // TODO(easwars): Handle this inline. acbw, ok := sc.(*acBalancerWrapper) if !ok { return } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { + ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) +} + +func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { return } - delete(ccb.subConns, acbw) - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + acbw.UpdateAddresses(addrs) } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is @@ -197,7 +367,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { acbw.mu.Lock() defer acbw.mu.Unlock() if len(addrs) <= 0 { - acbw.ac.tearDown(errConnDrain) + acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) return } if !acbw.ac.tryUpdateAddrs(addrs) { @@ -212,23 +382,23 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { acbw.ac.acbw = nil acbw.ac.mu.Unlock() acState := acbw.ac.getState() - acbw.ac.tearDown(errConnDrain) + acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) if acState == connectivity.Shutdown { return } - ac, err := cc.newAddrConn(addrs, opts) + newAC, err := cc.newAddrConn(addrs, opts) if err != nil { channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) return } - acbw.ac = ac - ac.mu.Lock() - ac.acbw = acbw - ac.mu.Unlock() + acbw.ac = newAC + newAC.mu.Lock() + newAC.acbw = acbw + newAC.mu.Unlock() if acState != connectivity.Idle { - ac.connect() + go newAC.connect() } } } @@ -236,7 +406,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { func (acbw *acBalancerWrapper) Connect() { acbw.mu.Lock() defer acbw.mu.Unlock() - acbw.ac.connect() + go acbw.ac.connect() } func (acbw *acBalancerWrapper) getAddrConn() *addrConn { diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index da0472967..ed75290cd 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -19,17 +19,17 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.25.0 -// protoc v3.3.0 +// protoc v3.14.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 import ( proto "github.com/golang/protobuf/proto" - duration "github.com/golang/protobuf/ptypes/duration" - timestamp "github.com/golang/protobuf/ptypes/timestamp" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" ) @@ -243,7 +243,7 @@ type GrpcLogEntry struct { unknownFields protoimpl.UnknownFields // The timestamp of the binary log message - Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // Uniquely identifies a call. The value must not be 0 in order to disambiguate // from an unset value. // Each call may have several log entries, they will all have the same call_id. @@ -308,7 +308,7 @@ func (*GrpcLogEntry) Descriptor() ([]byte, []int) { return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0} } -func (x *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp { +func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp { if x != nil { return x.Timestamp } @@ -439,7 +439,7 @@ type ClientHeader struct { // or : . Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` // the RPC timeout - Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` + Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` } func (x *ClientHeader) Reset() { @@ -495,7 +495,7 @@ func (x *ClientHeader) GetAuthority() string { return "" } -func (x *ClientHeader) GetTimeout() *duration.Duration { +func (x *ClientHeader) GetTimeout() *durationpb.Duration { if x != nil { return x.Timeout } @@ -1020,19 +1020,19 @@ func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ - (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType - (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger - (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type - (*GrpcLogEntry)(nil), // 3: grpc.binarylog.v1.GrpcLogEntry - (*ClientHeader)(nil), // 4: grpc.binarylog.v1.ClientHeader - (*ServerHeader)(nil), // 5: grpc.binarylog.v1.ServerHeader - (*Trailer)(nil), // 6: grpc.binarylog.v1.Trailer - (*Message)(nil), // 7: grpc.binarylog.v1.Message - (*Metadata)(nil), // 8: grpc.binarylog.v1.Metadata - (*MetadataEntry)(nil), // 9: grpc.binarylog.v1.MetadataEntry - (*Address)(nil), // 10: grpc.binarylog.v1.Address - (*timestamp.Timestamp)(nil), // 11: google.protobuf.Timestamp - (*duration.Duration)(nil), // 12: google.protobuf.Duration + (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType + (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger + (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type + (*GrpcLogEntry)(nil), // 3: grpc.binarylog.v1.GrpcLogEntry + (*ClientHeader)(nil), // 4: grpc.binarylog.v1.ClientHeader + (*ServerHeader)(nil), // 5: grpc.binarylog.v1.ServerHeader + (*Trailer)(nil), // 6: grpc.binarylog.v1.Trailer + (*Message)(nil), // 7: grpc.binarylog.v1.Message + (*Metadata)(nil), // 8: grpc.binarylog.v1.Metadata + (*MetadataEntry)(nil), // 9: grpc.binarylog.v1.MetadataEntry + (*Address)(nil), // 10: grpc.binarylog.v1.Address + (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 12: google.protobuf.Duration } var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{ 11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go new file mode 100644 index 000000000..a220c47c5 --- /dev/null +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index cbd671a85..de6d41c23 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -23,7 +23,7 @@ import ( "errors" "fmt" "math" - "net" + "net/url" "reflect" "strings" "sync" @@ -38,7 +38,7 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/grpcutil" + iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -48,6 +48,7 @@ import ( _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. + _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. ) const ( @@ -78,17 +79,17 @@ var ( // errNoTransportSecurity indicates that there is no transport security // being set for ClientConn. Users should either set one or explicitly // call WithInsecure DialOption to disable security. - errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") // errTransportCredsAndBundle indicates that creds bundle is used together // with other individual Transport Credentials. errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") - // errTransportCredentialsMissing indicates that users want to transmit security - // information (e.g., OAuth2 token) which requires secure connection on an insecure - // connection. + // errNoTransportCredsInBundle indicated that the configured creds bundle + // returned a transport credentials which was nil. + errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials") + // errTransportCredentialsMissing indicates that users want to transmit + // security information (e.g., OAuth2 token) which requires secure + // connection on an insecure connection. errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") - // errCredentialsConflict indicates that grpc.WithTransportCredentials() - // and grpc.WithInsecure() are both called for a connection. - errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") ) const ( @@ -104,6 +105,17 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { return DialContext(context.Background(), target, opts...) } +type defaultConfigSelector struct { + sc *ServiceConfig +} + +func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) { + return &iresolver.RPCConfig{ + Context: rpcInfo.Context, + MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method), + }, nil +} + // DialContext creates a client connection to the given target. By default, it's // a non-blocking dial (the function won't wait for connections to be // established, and connecting happens in the background). To make it a blocking @@ -131,6 +143,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * firstResolveEvent: grpcsync.NewEvent(), } cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) for _, opt := range opts { @@ -146,35 +159,35 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if channelz.IsOn() { - if cc.dopts.channelzParentID != 0 { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), - Severity: channelz.CtInfo, - }, - }) - } else { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.Info(logger, cc.channelzID, "Channel Created") + pid := cc.dopts.channelzParentID + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) + ted := &channelz.TraceEventDesc{ + Desc: "Channel created", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), + Severity: channelz.CtInfo, } - cc.csMgr.channelzID = cc.channelzID } + channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) + cc.csMgr.channelzID = cc.channelzID - if !cc.dopts.insecure { - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { - return nil, errNoTransportSecurity - } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return nil, errTransportCredsAndBundle - } - } else { - if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil { - return nil, errCredentialsConflict - } + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return nil, errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return nil, errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return nil, errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { for _, cd := range cc.dopts.copts.PerRPCCredentials { if cd.RequireTransportSecurity() { return nil, errTransportCredentialsMissing @@ -191,16 +204,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } cc.mkp = cc.dopts.copts.KeepaliveParams - if cc.dopts.copts.Dialer == nil { - cc.dopts.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) { - network, addr := parseDialTarget(addr) - return (&net.Dialer{}).DialContext(ctx, network, addr) - } - if cc.dopts.withProxy { - cc.dopts.copts.Dialer = newProxyDialer(cc.dopts.copts.Dialer) - } - } - if cc.dopts.copts.UserAgent != "" { cc.dopts.copts.UserAgent += " " + grpcUA } else { @@ -234,6 +237,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * case sc, ok := <-cc.dopts.scChan: if ok { cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) scSet = true } default: @@ -244,37 +248,15 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } // Determine the resolver to use. - cc.parsedTarget = grpcutil.ParseTarget(cc.target) - unixScheme := strings.HasPrefix(cc.target, "unix:") - channelz.Infof(logger, cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) - resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) - if resolverBuilder == nil { - // If resolver builder is still nil, the parsed target's scheme is - // not registered. Fallback to default resolver and set Endpoint to - // the original target. - channelz.Infof(logger, cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) - cc.parsedTarget = resolver.Target{ - Scheme: resolver.GetDefaultScheme(), - Endpoint: target, - } - resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme) - if resolverBuilder == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme) - } + resolverBuilder, err := cc.parseTargetAndFindResolver() + if err != nil { + return nil, err } - - creds := cc.dopts.copts.TransportCredentials - if creds != nil && creds.Info().ServerName != "" { - cc.authority = creds.Info().ServerName - } else if cc.dopts.insecure && cc.dopts.authority != "" { - cc.authority = cc.dopts.authority - } else if unixScheme { - cc.authority = "localhost" - } else { - // Use endpoint from "scheme://authority/endpoint" as the default - // authority for ClientConn. - cc.authority = cc.parsedTarget.Endpoint + cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts) + if err != nil { + return nil, err } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) if cc.dopts.scChan != nil && !scSet { // Blocking wait for the initial service config. @@ -282,6 +264,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * case sc, ok := <-cc.dopts.scChan: if ok { cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) } case <-ctx.Done(): return nil, ctx.Err() @@ -295,13 +278,15 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - cc.balancerBuildOpts = balancer.BuildOptions{ + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ DialCreds: credsClone, CredsBundle: cc.dopts.copts.CredsBundle, Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParentID: cc.channelzID, Target: cc.parsedTarget, - } + }) // Build the resolver. rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) @@ -315,6 +300,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * // A blocking dial blocks until the clientConn is ready. if cc.dopts.block { for { + cc.Connect() s := cc.GetState() if s == connectivity.Ready { break @@ -409,7 +395,7 @@ type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID int64 + channelzID *channelz.Identifier } // updateState updates the connectivity.State of ClientConn. @@ -475,32 +461,36 @@ var _ ClientConnInterface = (*ClientConn)(nil) // handshakes. It also handles errors on established connections by // re-resolving the name and reconnecting. type ClientConn struct { - ctx context.Context - cancel context.CancelFunc - - target string - parsedTarget resolver.Target - authority string - dopts dialOptions - csMgr *connectivityStateManager - - balancerBuildOpts balancer.BuildOptions - blockingpicker *pickerWrapper - - mu sync.RWMutex - resolverWrapper *ccResolverWrapper - sc *ServiceConfig - conns map[*addrConn]struct{} - // Keepalive parameter can be updated if a GoAway is received. - mkp keepalive.ClientParameters - curBalancerName string - balancerWrapper *ccBalancerWrapper - retryThrottler atomic.Value - + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + channelzID *channelz.Identifier // Channelz identifier for the channel. + balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + blockingpicker *pickerWrapper + safeConfigSelector iresolver.SafeConfigSelector + czData *channelzData + retryThrottler atomic.Value // Updated from service config. + + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. firstResolveEvent *grpcsync.Event - channelzID int64 // channelz unique identification number - czData *channelzData + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. + mu sync.RWMutex + resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error @@ -530,12 +520,24 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec // // Experimental // -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } +// Connect causes all subchannels in the ClientConn to attempt to connect if +// the channel is idle. Does not wait for the connection attempts to begin +// before returning. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) Connect() { + cc.balancerWrapper.exitIdle() +} + func (cc *ClientConn) scWatcher() { for { select { @@ -547,6 +549,7 @@ func (cc *ClientConn) scWatcher() { // TODO: load balance policy runtime change is ignored. // We may revisit this decision in the future. cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) cc.mu.Unlock() case <-cc.ctx.Done(): return @@ -585,13 +588,13 @@ func init() { func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { if cc.sc != nil { - cc.applyServiceConfigAndBalancer(cc.sc, addrs) + cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs) return } if cc.dopts.defaultServiceConfig != nil { - cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs) + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs) } else { - cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs) + cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs) } } @@ -612,9 +615,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { // with the new addresses. cc.maybeApplyDefaultServiceConfig(nil) - if cc.balancerWrapper != nil { - cc.balancerWrapper.resolverError(err) - } + cc.balancerWrapper.resolverError(err) // No addresses are valid with err set; return early. cc.mu.Unlock() @@ -622,24 +623,30 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var ret error - if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { + if cc.dopts.disableServiceConfig { + channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) + cc.maybeApplyDefaultServiceConfig(s.Addresses) + } else if s.ServiceConfig == nil { cc.maybeApplyDefaultServiceConfig(s.Addresses) // TODO: do we need to apply a failing LB policy if there is no // default, per the error handling design? } else { if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { - cc.applyServiceConfigAndBalancer(sc, s.Addresses) + configSelector := iresolver.GetConfigSelector(s) + if configSelector != nil { + if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 { + channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector") + } + } else { + configSelector = &defaultConfigSelector{sc} + } + cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) } else { ret = balancer.ErrBadResolverState - if cc.balancerWrapper == nil { - var err error - if s.ServiceConfig.Err != nil { - err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) - } else { - err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) - } - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) - cc.csMgr.updateState(connectivity.TransientFailure) + if cc.sc == nil { + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLB(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -647,24 +654,12 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var balCfg serviceconfig.LoadBalancingConfig - if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc != nil && cc.sc.lbConfig != nil { balCfg = cc.sc.lbConfig.cfg } - - cbn := cc.curBalancerName bw := cc.balancerWrapper cc.mu.Unlock() - if cbn != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - for i := 0; i < len(s.Addresses); { - if s.Addresses[i].Type == resolver.GRPCLB { - copy(s.Addresses[i:], s.Addresses[i+1:]) - s.Addresses = s.Addresses[:len(s.Addresses)-1] - continue - } - i++ - } - } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) if ret == nil { ret = uccsErr // prefer ErrBadResolver state since any other error is @@ -673,51 +668,28 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// switchBalancer starts the switching from current balancer to the balancer -// with the given name. -// -// It will NOT send the current address list to the new balancer. If needed, -// caller of this function should send address list to the new balancer after -// this function returns. +// applyFailingLB is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. // // Caller must hold cc.mu. -func (cc *ClientConn) switchBalancer(name string) { - if strings.EqualFold(cc.curBalancerName, name) { - return - } - - channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) - if cc.dopts.balancerBuilder != nil { - channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") - return - } - if cc.balancerWrapper != nil { - cc.balancerWrapper.close() - } - - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) - channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) - builder = newPickfirstBuilder() +func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) } else { - channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } - - cc.curBalancerName = builder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) } func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return - } - // TODO(bar switching) send updates to all balancer wrappers when balancer - // gracefully switching is supported. - cc.balancerWrapper.handleSubConnStateChange(sc, s, err) - cc.mu.Unlock() + cc.balancerWrapper.updateSubConnState(sc, s, err) } // newAddrConn creates an addrConn for addrs and adds it to cc.conns. @@ -740,17 +712,21 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub cc.mu.Unlock() return nil, ErrClientConnClosing } - if channelz.IsOn() { - ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) + + var err error + ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") + if err != nil { + return nil, err } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + cc.conns[ac] = struct{}{} cc.mu.Unlock() return ac, nil @@ -821,21 +797,35 @@ func (ac *addrConn) connect() error { ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() - // Start a goroutine connecting to the server asynchronously. - go ac.resetTransport() + ac.resetTransport() return nil } +func equalAddresses(a, b []resolver.Address) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if !v.Equal(b[i]) { + return false + } + } + return true +} + // tryUpdateAddrs tries to update ac.addrs with the new addresses list. // -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// // If ac is TransientFailure, it updates ac.addrs and returns true. The updated // addresses will be picked up by retry in the next iteration after backoff. // // If ac is Shutdown or Idle, it updates ac.addrs and returns true. // +// If the addresses is the same as the old list, it does nothing and returns +// true. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// // If ac is Ready, it checks whether current connected address of ac is in the // new addrs list. // - If true, it updates ac.addrs and returns true. The ac will keep using @@ -852,6 +842,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { return true } + if equalAddresses(ac.addrs, addrs) { + return true + } + if ac.state == connectivity.Connecting { return false } @@ -859,6 +853,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { // ac.state is Ready, try to find the connected address. var curAddrFound bool for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) if reflect.DeepEqual(ac.curAddr, a) { curAddrFound = true break @@ -872,6 +867,40 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { return curAddrFound } +// getServerName determines the serverName to be used in the connection +// handshake. The default value for the serverName is the authority on the +// ClientConn, which either comes from the user's dial target or through an +// authority override specified using the WithAuthority dial option. Name +// resolvers can specify a per-address override for the serverName through the +// resolver.Address.ServerName field which is used only if the WithAuthority +// dial option was not used. The rationale is that per-address authority +// overrides specified by the name resolver can represent a security risk, while +// an override specified by the user is more dependable since they probably know +// what they are doing. +func (cc *ClientConn) getServerName(addr resolver.Address) string { + if cc.dopts.authority != "" { + return cc.dopts.authority + } + if addr.ServerName != "" { + return addr.ServerName + } + return cc.authority +} + +func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { + if sc == nil { + return MethodConfig{} + } + if m, ok := sc.Methods[method]; ok { + return m + } + i := strings.LastIndex(method, "/") + if m, ok := sc.Methods[method[:i+1]]; ok { + return m + } + return sc.Methods[""] +} + // GetMethodConfig gets the method config of the input method. // If there's an exact match for input method (i.e. /service/method), we return // the corresponding MethodConfig. @@ -884,17 +913,7 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { // TODO: Avoid the locking here. cc.mu.RLock() defer cc.mu.RUnlock() - if cc.sc == nil { - return MethodConfig{} - } - if m, ok := cc.sc.Methods[method]; ok { - return m - } - i := strings.LastIndex(method, "/") - if m, ok := cc.sc.Methods[method[:i+1]]; ok { - return m - } - return cc.sc.Methods[""] + return getMethodConfig(cc.sc, method) } func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { @@ -907,22 +926,21 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) - if err != nil { - return nil, nil, toRPCErr(err) - } - return t, done, nil } -func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) { +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { if sc == nil { // should never reach here. return } cc.sc = sc + if configSelector != nil { + cc.safeConfigSelector.UpdateConfigSelector(configSelector) + } if cc.sc.retryThrottling != nil { newThrottler := &retryThrottler{ @@ -936,35 +954,26 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []r cc.retryThrottler.Store((*retryThrottler)(nil)) } - if cc.dopts.balancerBuilder == nil { - // Only look at balancer types and switch balancer if balancer dial - // option is not set. - var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break } } - cc.switchBalancer(newBalancerName) - } else if cc.balancerWrapper == nil { - // Balancer dial option was set, and this is the first time handling - // resolved addresses. Build a balancer with dopts.balancerBuilder. - cc.curBalancerName = cc.dopts.balancerBuilder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } } + cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { @@ -1015,37 +1024,37 @@ func (cc *ClientConn) Close() error { rWrapper := cc.resolverWrapper cc.resolverWrapper = nil bWrapper := cc.balancerWrapper - cc.balancerWrapper = nil cc.mu.Unlock() + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. cc.blockingpicker.close() - - if rWrapper != nil { - rWrapper.close() - } if bWrapper != nil { bWrapper.close() } + if rWrapper != nil { + rWrapper.close() + } for ac := range conns { ac.tearDown(ErrClientConnClosing) } - if channelz.IsOn() { - ted := &channelz.TraceEventDesc{ - Desc: "Channel Deleted", + ted := &channelz.TraceEventDesc{ + Desc: "Channel deleted", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), Severity: channelz.CtInfo, } - if cc.dopts.channelzParentID != 0 { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(cc.channelzID) } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelzID) + return nil } @@ -1075,7 +1084,7 @@ type addrConn struct { backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID int64 // channelz unique identification number. + channelzID *channelz.Identifier czData *channelzData } @@ -1104,112 +1113,86 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } func (ac *addrConn) resetTransport() { - for i := 0; ; i++ { - if i > 0 { - ac.cc.resolveNow(resolver.ResolveNowOptions{}) - } + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) + + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + + if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() return } + ac.updateConnectivityState(connectivity.TransientFailure, err) - addrs := ac.addrs - backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) - // This will be the duration that dial gets to finish. - dialDuration := minConnectTimeout - if ac.dopts.minConnectTimeout != nil { - dialDuration = ac.dopts.minConnectTimeout() - } - - if dialDuration < backoffFor { - // Give dial more time as we keep failing to connect. - dialDuration = backoffFor - } - // We can potentially spend all the time trying the first address, and - // if the server accepts the connection and then hangs, the following - // addresses will never be tried. - // - // The spec doesn't mention what should be done for multiple addresses. - // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm - connectDeadline := time.Now().Add(dialDuration) - - ac.updateConnectivityState(connectivity.Connecting, nil) - ac.transport = nil + // Backoff. + b := ac.resetBackoff ac.mu.Unlock() - newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) - if err != nil { - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } - ac.updateConnectivityState(connectivity.TransientFailure, err) - - // Backoff. - b := ac.resetBackoff + ac.backoffIdx++ ac.mu.Unlock() - - timer := time.NewTimer(backoffFor) - select { - case <-timer.C: - ac.mu.Lock() - ac.backoffIdx++ - ac.mu.Unlock() - case <-b: - timer.Stop() - case <-ac.ctx.Done(): - timer.Stop() - return - } - continue + case <-b: + timer.Stop() + case <-ac.ctx.Done(): + timer.Stop() + return } ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - newTr.Close() - return + if ac.state != connectivity.Shutdown { + ac.updateConnectivityState(connectivity.Idle, err) } - ac.curAddr = addr - ac.transport = newTr - ac.backoffIdx = 0 - - hctx, hcancel := context.WithCancel(ac.ctx) - ac.startHealthCheck(hctx) ac.mu.Unlock() - - // Block until the created transport is down. And when this happens, - // we restart from the top of the addr list. - <-reconnect.Done() - hcancel() - // restart connecting - the top of the loop will set state to - // CONNECTING. This is against the current connectivity semantics doc, - // however it allows for graceful behavior for RPCs not yet dispatched - // - unfortunate timing would otherwise lead to the RPC failing even - // though the TRANSIENT_FAILURE state (called for by the doc) would be - // instantaneous. - // - // Ideally we should transition to Idle here and block until there is - // RPC activity that leads to the balancer requesting a reconnect of - // the associated SubConn. + return } + // Success; reset backoff. + ac.mu.Lock() + ac.backoffIdx = 0 + ac.mu.Unlock() } -// tryAllAddrs tries to creates a connection to the addresses, and stop when at the -// first successful one. It returns the transport, the address and a Event in -// the successful case. The Event fires when the returned transport disconnects. -func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { +// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// the first successful one. It returns an error if no address was successfully +// connected, or updates ac appropriately with the new transport. +func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() - return nil, resolver.Address{}, nil, errConnClosing + return errConnClosing } ac.cc.mu.RLock() @@ -1224,9 +1207,9 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) - newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) + err := ac.createTransport(addr, copts, connectDeadline) if err == nil { - return newTr, addr, reconnect, nil + return nil } if firstConnErr == nil { firstConnErr = err @@ -1235,86 +1218,118 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T } // Couldn't connect to any address. - return nil, resolver.Address{}, nil, firstConnErr + return firstConnErr } -// createTransport creates a connection to addr. It returns the transport and a -// Event in the successful case. The Event fires when the returned transport -// disconnects. -func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) { - prefaceReceived := make(chan struct{}) - onCloseCalled := make(chan struct{}) - reconnect := grpcsync.NewEvent() +// createTransport creates a connection to addr. It returns an error if the +// address was not successfully connected, or updates ac appropriately with the +// new transport. +func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { + // TODO: Delete prefaceReceived and move the logic to wait for it into the + // transport. + prefaceReceived := grpcsync.NewEvent() + connClosed := grpcsync.NewEvent() - // addr.ServerName takes precedent over ClientConn authority, if present. - if addr.ServerName == "" { - addr.ServerName = ac.cc.authority - } + addr.ServerName = ac.cc.getServerName(addr) + hctx, hcancel := context.WithCancel(ac.ctx) + hcStarted := false // protected by ac.mu - once := sync.Once{} - onGoAway := func(r transport.GoAwayReason) { + onClose := func() { ac.mu.Lock() - ac.adjustParams(r) - once.Do(func() { - if ac.state == connectivity.Ready { - // Prevent this SubConn from being used for new RPCs by setting its - // state to Connecting. - // - // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting, nil) - } - }) - ac.mu.Unlock() - reconnect.Fire() + defer ac.mu.Unlock() + defer connClosed.Fire() + defer hcancel() + if !hcStarted || hctx.Err() != nil { + // We didn't start the health check or set the state to READY, so + // no need to do anything else here. + // + // OR, we have already cancelled the health check context, meaning + // we have already called onClose once for this transport. In this + // case it would be dangerous to clear the transport and update the + // state, since there may be a new transport in this addrConn. + return + } + ac.transport = nil + // Refresh the name resolver + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + if ac.state != connectivity.Shutdown { + ac.updateConnectivityState(connectivity.Idle, nil) + } } - onClose := func() { + onGoAway := func(r transport.GoAwayReason) { ac.mu.Lock() - once.Do(func() { - if ac.state == connectivity.Ready { - // Prevent this SubConn from being used for new RPCs by setting its - // state to Connecting. - // - // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting, nil) - } - }) + ac.adjustParams(r) ac.mu.Unlock() - close(onCloseCalled) - reconnect.Fire() - } - - onPrefaceReceipt := func() { - close(prefaceReceived) + onClose() } connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) defer cancel() - if channelz.IsOn() { - copts.ChannelzParentID = ac.channelzID - } + copts.ChannelzParentID = ac.channelzID - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onPrefaceReceipt, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) - return nil, nil, err + hcancel() + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) + return err } select { - case <-time.After(time.Until(connectDeadline)): + case <-connectCtx.Done(): // We didn't get the preface in time. - newTr.Close() - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) - return nil, nil, errors.New("timed out waiting for server handshake") - case <-prefaceReceived: + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. + newTr.Close(transport.ErrConnClosing) + if connectCtx.Err() == context.DeadlineExceeded { + err := errors.New("failed to receive server preface within timeout") + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err) + return err + } + return nil + case <-prefaceReceived.Done(): // We got the preface - huzzah! things are good. - case <-onCloseCalled: - // The transport has already closed - noop. - return nil, nil, errors.New("connection closed") - // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. + ac.mu.Lock() + defer ac.mu.Unlock() + if connClosed.HasFired() { + // onClose called first; go idle but do nothing else. + if ac.state != connectivity.Shutdown { + ac.updateConnectivityState(connectivity.Idle, nil) + } + return nil + } + if ac.state == connectivity.Shutdown { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. + go newTr.Close(transport.ErrConnClosing) + return nil + } + ac.curAddr = addr + ac.transport = newTr + hcStarted = true + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil + case <-connClosed.Done(): + // The transport has already closed. If we received the preface, too, + // this is not an error. + select { + case <-prefaceReceived.Done(): + return nil + default: + return errors.New("connection closed before server preface received") + } } - return newTr, reconnect, nil } // startHealthCheck starts the health checking stream (RPC) to watch the health @@ -1398,33 +1413,20 @@ func (ac *addrConn) resetConnectBackoff() { ac.mu.Unlock() } -// getReadyTransport returns the transport if ac's state is READY. -// Otherwise it returns nil, false. -// If ac's state is IDLE, it will trigger ac to connect. -func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { +// getReadyTransport returns the transport if ac's state is READY or nil if not. +func (ac *addrConn) getReadyTransport() transport.ClientTransport { ac.mu.Lock() - if ac.state == connectivity.Ready && ac.transport != nil { - t := ac.transport - ac.mu.Unlock() - return t, true - } - var idle bool - if ac.state == connectivity.Idle { - idle = true - } - ac.mu.Unlock() - // Trigger idle ac to connect. - if idle { - ac.connect() + defer ac.mu.Unlock() + if ac.state == connectivity.Ready { + return ac.transport } - return nil, false + return nil } // tearDown starts to tear down the addrConn. -// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in -// some edge cases (e.g., the caller opens and closes many addrConn's in a -// tight loop. -// tearDown doesn't remove ac from ac.cc.conns. +// +// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct +// will leak. In most cases, call cc.removeAddrConn() instead. func (ac *addrConn) tearDown(err error) { ac.mu.Lock() if ac.state == connectivity.Shutdown { @@ -1448,19 +1450,18 @@ func (ac *addrConn) tearDown(err error) { curTr.GracefulClose() ac.mu.Lock() } - if channelz.IsOn() { - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Deleted", + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(ac.channelzID) - } + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() } @@ -1569,3 +1570,114 @@ func (cc *ClientConn) connectionError() error { defer cc.lceMu.Unlock() return cc.lastConnectionError } + +func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { + channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) + + var rb resolver.Builder + parsedTarget, err := parseTarget(cc.target) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) + } else { + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.Scheme) + if rb != nil { + cc.parsedTarget = parsedTarget + return rb, nil + } + } + + // We are here because the user's dial target did not contain a scheme or + // specified an unregistered scheme. We should fallback to the default + // scheme, except when a custom dialer is specified in which case, we should + // always use passthrough scheme. + defScheme := resolver.GetDefaultScheme() + channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme) + canonicalTarget := defScheme + ":///" + cc.target + + parsedTarget, err = parseTarget(canonicalTarget) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) + return nil, err + } + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.Scheme) + if rb == nil { + return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme) + } + cc.parsedTarget = parsedTarget + return rb, nil +} + +// parseTarget uses RFC 3986 semantics to parse the given target into a +// resolver.Target struct containing scheme, authority and endpoint. Query +// params are stripped from the endpoint. +func parseTarget(target string) (resolver.Target, error) { + u, err := url.Parse(target) + if err != nil { + return resolver.Target{}, err + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field instead of the `Endpoint` field. + endpoint := u.Path + if endpoint == "" { + endpoint = u.Opaque + } + endpoint = strings.TrimPrefix(endpoint, "/") + return resolver.Target{ + Scheme: u.Scheme, + Authority: u.Host, + Endpoint: endpoint, + URL: *u, + }, nil +} + +// Determine channel authority. The order of precedence is as follows: +// - user specified authority override using `WithAuthority` dial option +// - creds' notion of server name for the authentication handshake +// - endpoint from dial target of the form "scheme://[authority]/endpoint" +func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) { + // Historically, we had two options for users to specify the serverName or + // authority for a channel. One was through the transport credentials + // (either in its constructor, or through the OverrideServerName() method). + // The other option (for cases where WithInsecure() dial option was used) + // was to use the WithAuthority() dial option. + // + // A few things have changed since: + // - `insecure` package with an implementation of the `TransportCredentials` + // interface for the insecure case + // - WithAuthority() dial option support for secure credentials + authorityFromCreds := "" + if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" { + authorityFromCreds = creds.Info().ServerName + } + authorityFromDialOption := dopts.authority + if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { + return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + } + + switch { + case authorityFromDialOption != "": + return authorityFromDialOption, nil + case authorityFromCreds != "": + return authorityFromCreds, nil + case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): + // TODO: remove when the unix resolver implements optional interface to + // return channel authority. + return "localhost", nil + case strings.HasPrefix(endpoint, ":"): + return "localhost" + endpoint, nil + default: + // TODO: Define an optional interface on the resolver builder to return + // the channel authority given the user's dial target. For resolvers + // which don't implement this interface, we will use the endpoint from + // "scheme://authority/endpoint" as the default authority. + return endpoint, nil + } +} diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go index 010156261..4a8992642 100644 --- a/vendor/google.golang.org/grpc/connectivity/connectivity.go +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -18,7 +18,6 @@ // Package connectivity defines connectivity semantics. // For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. -// All APIs in this package are experimental. package connectivity import ( @@ -45,7 +44,7 @@ func (s State) String() string { return "SHUTDOWN" default: logger.Errorf("unknown connectivity state: %d", s) - return "Invalid-State" + return "INVALID_STATE" } } @@ -61,3 +60,35 @@ const ( // Shutdown indicates the ClientConn has started shutting down. Shutdown ) + +// ServingMode indicates the current mode of operation of the server. +// +// Only xDS enabled gRPC servers currently report their serving mode. +type ServingMode int + +const ( + // ServingModeStarting indicates that the server is starting up. + ServingModeStarting ServingMode = iota + // ServingModeServing indicates that the server contains all required + // configuration and is serving RPCs. + ServingModeServing + // ServingModeNotServing indicates that the server is not accepting new + // connections. Existing connections will be closed gracefully, allowing + // in-progress RPCs to complete. A server enters this mode when it does not + // contain the required configuration to serve RPCs. + ServingModeNotServing +) + +func (s ServingMode) String() string { + switch s { + case ServingModeStarting: + return "STARTING" + case ServingModeServing: + return "SERVING" + case ServingModeNotServing: + return "NOT_SERVING" + default: + logger.Errorf("unknown serving mode: %d", s) + return "INVALID_MODE" + } +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 02766443a..96ff1877e 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -30,7 +30,7 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc/attributes" - "google.golang.org/grpc/internal" + icredentials "google.golang.org/grpc/internal/credentials" ) // PerRPCCredentials defines the common interface for the credentials which need to @@ -58,9 +58,9 @@ type PerRPCCredentials interface { type SecurityLevel int const ( - // Invalid indicates an invalid security level. + // InvalidSecurityLevel indicates an invalid security level. // The zero SecurityLevel value is invalid for backward compatibility. - Invalid SecurityLevel = iota + InvalidSecurityLevel SecurityLevel = iota // NoSecurity indicates a connection is insecure. NoSecurity // IntegrityOnly indicates a connection only provides integrity protection. @@ -92,7 +92,7 @@ type CommonAuthInfo struct { } // GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. -func (c *CommonAuthInfo) GetCommonAuthInfo() *CommonAuthInfo { +func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { return c } @@ -140,6 +140,11 @@ type TransportCredentials interface { // Additionally, ClientHandshakeInfo data will be available via the context // passed to this call. // + // The second argument to this method is the `:authority` header value used + // while creating new streams on this connection after authentication + // succeeds. Implementations must use this as the server name during the + // authentication handshake. + // // If the returned net.Conn is closed, it MUST close the net.Conn provided. ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns @@ -153,9 +158,13 @@ type TransportCredentials interface { Info() ProtocolInfo // Clone makes a copy of this TransportCredentials. Clone() TransportCredentials - // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. - // gRPC internals also use it to override the virtual hosting name if it is set. - // It must be called before dialing. Currently, this is only used by grpclb. + // OverrideServerName specifies the value used for the following: + // - verifying the hostname on the returned certificates + // - as SNI in the client's handshake to support virtual hosting + // - as the value for `:authority` header at stream creation time + // + // Deprecated: use grpc.WithAuthority instead. Will be supported + // throughout 1.x. OverrideServerName(string) error } @@ -169,8 +178,18 @@ type TransportCredentials interface { // // This API is experimental. type Bundle interface { + // TransportCredentials returns the transport credentials from the Bundle. + // + // Implementations must return non-nil transport credentials. If transport + // security is not needed by the Bundle, implementations may choose to + // return insecure.NewCredentials(). TransportCredentials() TransportCredentials + + // PerRPCCredentials returns the per-RPC credentials from the Bundle. + // + // May be nil if per-RPC credentials are not needed. PerRPCCredentials() PerRPCCredentials + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the // existing Bundle may cause races. // @@ -188,15 +207,12 @@ type RequestInfo struct { AuthInfo AuthInfo } -// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object. -type requestInfoKey struct{} - // RequestInfoFromContext extracts the RequestInfo from the context if it exists. // // This API is experimental. func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { - ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo) - return + ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo) + return ri, ok } // ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes @@ -211,16 +227,12 @@ type ClientHandshakeInfo struct { Attributes *attributes.Attributes } -// clientHandshakeInfoKey is a struct used as the key to store -// ClientHandshakeInfo in a context. -type clientHandshakeInfoKey struct{} - // ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored // in ctx. // // This API is experimental. func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { - chi, _ := ctx.Value(clientHandshakeInfoKey{}).(ClientHandshakeInfo) + chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo) return chi } @@ -229,17 +241,16 @@ func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { // or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. // // This API is experimental. -func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error { +func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error { type internalInfo interface { - GetCommonAuthInfo() *CommonAuthInfo + GetCommonAuthInfo() CommonAuthInfo } - ri, _ := RequestInfoFromContext(ctx) - if ri.AuthInfo == nil { - return errors.New("unable to obtain SecurityLevel from context") + if ai == nil { + return errors.New("AuthInfo is nil") } - if ci, ok := ri.AuthInfo.(internalInfo); ok { + if ci, ok := ai.(internalInfo); ok { // CommonAuthInfo.SecurityLevel has an invalid value. - if ci.GetCommonAuthInfo().SecurityLevel == Invalid { + if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel { return nil } if ci.GetCommonAuthInfo().SecurityLevel < level { @@ -250,15 +261,6 @@ func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error { return nil } -func init() { - internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context { - return context.WithValue(ctx, requestInfoKey{}, ri) - } - internal.NewClientHandshakeInfoContext = func(ctx context.Context, chi ClientHandshakeInfo) context.Context { - return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) - } -} - // ChannelzSecurityInfo defines the interface that security protocols should implement // in order to provide security info to channelz. // diff --git a/vendor/google.golang.org/grpc/credentials/go12.go b/vendor/google.golang.org/grpc/credentials/go12.go deleted file mode 100644 index ccbf35b33..000000000 --- a/vendor/google.golang.org/grpc/credentials/go12.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build go1.12 - -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package credentials - -import "crypto/tls" - -// This init function adds cipher suite constants only defined in Go 1.12. -func init() { - cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256" - cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384" - cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256" -} diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go new file mode 100644 index 000000000..82bee1443 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -0,0 +1,98 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package insecure provides an implementation of the +// credentials.TransportCredentials interface which disables transport security. +package insecure + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// NewCredentials returns a credentials which disables transport security. +// +// Note that using this credentials with per-RPC credentials which require +// transport security is incompatible and will cause grpc.Dial() to fail. +func NewCredentials() credentials.TransportCredentials { + return insecureTC{} +} + +// insecureTC implements the insecure transport credentials. The handshake +// methods simply return the passed in net.Conn and set the security level to +// NoSecurity. +type insecureTC struct{} + +func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{SecurityProtocol: "insecure"} +} + +func (insecureTC) Clone() credentials.TransportCredentials { + return insecureTC{} +} + +func (insecureTC) OverrideServerName(string) error { + return nil +} + +// info contains the auth information for an insecure connection. +// It implements the AuthInfo interface. +type info struct { + credentials.CommonAuthInfo +} + +// AuthType returns the type of info as a string. +func (info) AuthType() string { + return "insecure" +} + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 8ee7124f2..784822d05 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -230,4 +230,7 @@ var cipherSuiteLookup = map[uint16]string{ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", + tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", + tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", } diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index a93fcab8f..f2f605a17 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -20,16 +20,15 @@ package grpc import ( "context" - "fmt" "net" "time" "google.golang.org/grpc/backoff" - "google.golang.org/grpc/balancer" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" internalbackoff "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -45,20 +44,17 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor - dc Decompressor - bs internalbackoff.Strategy - block bool - returnLastError bool - insecure bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by WithBalancerName dial option. - balancerBuilder balancer.Builder - channelzParentID int64 + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption + channelzParentID *channelz.Identifier disableServiceConfig bool disableRetry bool disableHealthCheck bool @@ -66,12 +62,7 @@ type dialOptions struct { minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string - // This is used by ccResolverWrapper to backoff between successive calls to - // resolver.ResolveNow(). The user will have no need to configure this, but - // we need to be able to configure this in tests. - resolveNowBackoff func(int) time.Duration - resolvers []resolver.Builder - withProxy bool + resolvers []resolver.Builder } // DialOption configures how we set up the connection. @@ -201,25 +192,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithBalancerName sets the balancer that the ClientConn will be initialized -// with. Balancer registered with balancerName will be used. This function -// panics if no balancer was registered by balancerName. -// -// The balancer cannot be overridden by balancer option specified by service -// config. -// -// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig -// instead. Will be removed in a future 1.x release. -func WithBalancerName(balancerName string) DialOption { - builder := balancer.Get(balancerName) - if builder == nil { - panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) - } - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = builder - }) -} - // WithServiceConfig returns a DialOption which has a channel to read the // service configuration. // @@ -233,18 +205,14 @@ func WithServiceConfig(c <-chan ServiceConfig) DialOption { }) } -// WithConnectParams configures the dialer to use the provided ConnectParams. +// WithConnectParams configures the ClientConn to use the provided ConnectParams +// for creating and maintaining connections to servers. // // The backoff configuration specified as part of the ConnectParams overrides // all defaults specified in // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider // using the backoff.DefaultConfig as a base, in cases where you want to // override only a subset of the backoff configuration. -// -// Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func WithConnectParams(p ConnectParams) DialOption { return newFuncDialOption(func(o *dialOptions) { o.bs = internalbackoff.Exponential{Config: p.Backoff} @@ -282,7 +250,7 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { }) } -// WithBlock returns a DialOption which makes caller of Dial blocks until the +// WithBlock returns a DialOption which makes callers of Dial block until the // underlying connection is up. Without this, Dial returns immediately and // connecting the server happens in background. func WithBlock() DialOption { @@ -308,11 +276,17 @@ func WithReturnConnectionError() DialOption { } // WithInsecure returns a DialOption which disables transport security for this -// ClientConn. Note that transport security is required unless WithInsecure is -// set. +// ClientConn. Under the hood, it uses insecure.NewCredentials(). +// +// Note that using this DialOption with per-RPC credentials (through +// WithCredentialsBundle or WithPerRPCCredentials) which require transport +// security is incompatible and will cause grpc.Dial() to fail. +// +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. func WithInsecure() DialOption { return newFuncDialOption(func(o *dialOptions) { - o.insecure = true + o.copts.TransportCredentials = insecure.NewCredentials() }) } @@ -325,7 +299,7 @@ func WithInsecure() DialOption { // later release. func WithNoProxy() DialOption { return newFuncDialOption(func(o *dialOptions) { - o.withProxy = false + o.copts.UseProxy = false }) } @@ -487,8 +461,7 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt } // WithAuthority returns a DialOption that specifies the value to be used as the -// :authority pseudo-header. This value only works with WithInsecure and has no -// effect if TransportCredentials are present. +// :authority pseudo-header and as the server name in authentication handshake. func WithAuthority(a string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.authority = a @@ -503,7 +476,7 @@ func WithAuthority(a string) DialOption { // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. -func WithChannelzParentID(id int64) DialOption { +func WithChannelzParentID(id *channelz.Identifier) DialOption { return newFuncDialOption(func(o *dialOptions) { o.channelzParentID = id }) @@ -524,14 +497,16 @@ func WithDisableServiceConfig() DialOption { // WithDefaultServiceConfig returns a DialOption that configures the default // service config, which will be used in cases where: // -// 1. WithDisableServiceConfig is also used. -// 2. Resolver does not return a service config or if the resolver returns an -// invalid service config. +// 1. WithDisableServiceConfig is also used, or // -// Experimental +// 2. The name resolver does not provide a service config or provides an +// invalid service config. // -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// The parameter s is the JSON representation of the default service config. +// For more information about service configs, see: +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +// For a simple example of usage, see: +// examples/features/load_balancing/client/main.go func WithDefaultServiceConfig(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.defaultServiceConfigRawJSON = &s @@ -543,14 +518,8 @@ func WithDefaultServiceConfig(s string) DialOption { // will happen automatically if no data is written to the wire or if the RPC is // unprocessed by the remote server. // -// Retry support is currently disabled by default, but will be enabled by -// default in the future. Until then, it may be enabled by setting the -// environment variable "GRPC_GO_RETRY" to "on". -// -// Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Retry support is currently enabled by default, but may be disabled by +// setting the environment variable "GRPC_GO_RETRY" to "off". func WithDisableRetry() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableRetry = true @@ -590,14 +559,12 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ - disableRetry: !envconfig.Retry, healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ WriteBufferSize: defaultWriteBufSize, ReadBufferSize: defaultReadBufSize, + UseProxy: true, }, - resolveNowBackoff: internalbackoff.DefaultExponential.Backoff, - withProxy: true, } } @@ -612,16 +579,6 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { }) } -// withResolveNowBackoff specifies the function that clientconn uses to backoff -// between successive calls to resolver.ResolveNow(). -// -// For testing purpose only. -func withResolveNowBackoff(f func(int) time.Duration) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.resolveNowBackoff = f - }) -} - // WithResolvers allows a list of resolver implementations to be registered // locally with the ClientConn without needing to be globally registered via // resolver.Register. They will be matched against the scheme used for the diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 6d84f74c7..18e530fc9 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -108,7 +108,7 @@ var registeredCodecs = make(map[string]Codec) // more details. // // NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Compressors are +// an init() function), and is not thread-safe. If multiple Codecs are // registered with the same name, the one registered last will take effect. func RegisterCodec(codec Codec) { if codec == nil { diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 66b97a6f6..3009b35af 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -21,8 +21,7 @@ package proto import ( - "math" - "sync" + "fmt" "github.com/golang/protobuf/proto" "google.golang.org/grpc/encoding" @@ -38,73 +37,22 @@ func init() { // codec is a Codec implementation with protobuf. It is the default codec for gRPC. type codec struct{} -type cachedProtoBuffer struct { - lastMarshaledSize uint32 - proto.Buffer -} - -func capToMaxInt32(val int) uint32 { - if val > math.MaxInt32 { - return uint32(math.MaxInt32) - } - return uint32(val) -} - -func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { - protoMsg := v.(proto.Message) - newSlice := make([]byte, 0, cb.lastMarshaledSize) - - cb.SetBuf(newSlice) - cb.Reset() - if err := cb.Marshal(protoMsg); err != nil { - return nil, err - } - out := cb.Bytes() - cb.lastMarshaledSize = capToMaxInt32(len(out)) - return out, nil -} - func (codec) Marshal(v interface{}) ([]byte, error) { - if pm, ok := v.(proto.Marshaler); ok { - // object can marshal itself, no need for buffer - return pm.Marshal() + vv, ok := v.(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) } - - cb := protoBufferPool.Get().(*cachedProtoBuffer) - out, err := marshal(v, cb) - - // put back buffer and lose the ref to the slice - cb.SetBuf(nil) - protoBufferPool.Put(cb) - return out, err + return proto.Marshal(vv) } func (codec) Unmarshal(data []byte, v interface{}) error { - protoMsg := v.(proto.Message) - protoMsg.Reset() - - if pu, ok := protoMsg.(proto.Unmarshaler); ok { - // object can unmarshal itself, no need for buffer - return pu.Unmarshal(data) + vv, ok := v.(proto.Message) + if !ok { + return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) } - - cb := protoBufferPool.Get().(*cachedProtoBuffer) - cb.SetBuf(data) - err := cb.Unmarshal(protoMsg) - cb.SetBuf(nil) - protoBufferPool.Put(cb) - return err + return proto.Unmarshal(data, vv) } func (codec) Name() string { return Name } - -var protoBufferPool = &sync.Pool{ - New: func() interface{} { - return &cachedProtoBuffer{ - Buffer: proto.Buffer{}, - lastMarshaledSize: 16, - } - }, -} diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod index d97276556..6a760ed74 100644 --- a/vendor/google.golang.org/grpc/go.mod +++ b/vendor/google.golang.org/grpc/go.mod @@ -1,17 +1,19 @@ module google.golang.org/grpc -go 1.11 +go 1.14 require ( - github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f - github.com/envoyproxy/go-control-plane v0.9.4 + github.com/cespare/xxhash/v2 v2.1.1 + github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 + github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 + github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/golang/protobuf v1.4.1 - github.com/google/go-cmp v0.5.0 + github.com/golang/protobuf v1.5.2 + github.com/google/go-cmp v0.5.6 github.com/google/uuid v1.1.2 - golang.org/x/net v0.0.0-20190311183353-d8887717615a - golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be - golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a + golang.org/x/net v0.0.0-20201021035429-f5854403a974 + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d + golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 - google.golang.org/protobuf v1.25.0 + google.golang.org/protobuf v1.27.1 ) diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum index 356a82e3d..5f418dba1 100644 --- a/vendor/google.golang.org/grpc/go.sum +++ b/vendor/google.golang.org/grpc/go.sum @@ -1,84 +1,141 @@ -cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4 h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 4ee33171e..7c1f66409 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -19,11 +19,14 @@ package grpclog import ( + "encoding/json" + "fmt" "io" "io/ioutil" "log" "os" "strconv" + "strings" "google.golang.org/grpc/internal/grpclog" ) @@ -95,8 +98,9 @@ var severityName = []string{ // loggerT is the default logger used by grpclog. type loggerT struct { - m []*log.Logger - v int + m []*log.Logger + v int + jsonFormat bool } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -105,19 +109,32 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) +} + +type loggerV2Config struct { + verbose int + jsonFormat bool +} + +func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { var m []*log.Logger - m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + flag := log.LstdFlags + if c.jsonFormat { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) - m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) - return &loggerT{m: m, v: v} + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} } // newLoggerV2 creates a loggerV2 to be used as default logger. @@ -142,58 +159,79 @@ func newLoggerV2() LoggerV2 { if vl, err := strconv.Atoi(vLevel); err == nil { v = vl } - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) + + jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") + + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ + verbose: v, + jsonFormat: jsonFormat, + }) +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) } func (g *loggerT) Info(args ...interface{}) { - g.m[infoLog].Print(args...) + g.output(infoLog, fmt.Sprint(args...)) } func (g *loggerT) Infoln(args ...interface{}) { - g.m[infoLog].Println(args...) + g.output(infoLog, fmt.Sprintln(args...)) } func (g *loggerT) Infof(format string, args ...interface{}) { - g.m[infoLog].Printf(format, args...) + g.output(infoLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Warning(args ...interface{}) { - g.m[warningLog].Print(args...) + g.output(warningLog, fmt.Sprint(args...)) } func (g *loggerT) Warningln(args ...interface{}) { - g.m[warningLog].Println(args...) + g.output(warningLog, fmt.Sprintln(args...)) } func (g *loggerT) Warningf(format string, args ...interface{}) { - g.m[warningLog].Printf(format, args...) + g.output(warningLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Error(args ...interface{}) { - g.m[errorLog].Print(args...) + g.output(errorLog, fmt.Sprint(args...)) } func (g *loggerT) Errorln(args ...interface{}) { - g.m[errorLog].Println(args...) + g.output(errorLog, fmt.Sprintln(args...)) } func (g *loggerT) Errorf(format string, args ...interface{}) { - g.m[errorLog].Printf(format, args...) + g.output(errorLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Fatal(args ...interface{}) { - g.m[fatalLog].Fatal(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) } func (g *loggerT) Fatalln(args ...interface{}) { - g.m[fatalLog].Fatalln(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) } func (g *loggerT) Fatalf(format string, args ...interface{}) { - g.m[fatalLog].Fatalf(format, args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) } func (g *loggerT) V(l int) bool { @@ -210,12 +248,12 @@ func (g *loggerT) V(l int) bool { // later release. type DepthLoggerV2 interface { LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...interface{}) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. WarningDepth(depth int, args ...interface{}) - // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. ErrorDepth(depth int, args ...interface{}) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...interface{}) } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 2b6803ecc..a66024d23 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.25.0 -// protoc v3.3.0 +// protoc v3.14.0 // source: grpc/health/v1/health.proto package grpc_health_v1 diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 463c8734a..69f525d1b 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.14.0 +// source: grpc/health/v1/health.proto package grpc_health_v1 @@ -11,6 +15,7 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 // HealthClient is the client API for Health service. @@ -56,7 +61,7 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . } func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { - stream, err := c.cc.NewStream(ctx, &_Health_serviceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...) + stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...) if err != nil { return nil, err } @@ -130,8 +135,8 @@ type UnsafeHealthServer interface { mustEmbedUnimplementedHealthServer() } -func RegisterHealthServer(s *grpc.Server, srv HealthServer) { - s.RegisterService(&_Health_serviceDesc, srv) +func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) { + s.RegisterService(&Health_ServiceDesc, srv) } func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { @@ -173,7 +178,10 @@ func (x *healthWatchServer) Send(m *HealthCheckResponse) error { return x.ServerStream.SendMsg(m) } -var _Health_serviceDesc = grpc.ServiceDesc{ +// Health_ServiceDesc is the grpc.ServiceDesc for Health service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Health_ServiceDesc = grpc.ServiceDesc{ ServiceName: "grpc.health.v1.Health", HandlerType: (*HealthServer)(nil), Methods: []grpc.MethodDesc{ diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh deleted file mode 100644 index 15ff9facd..000000000 --- a/vendor/google.golang.org/grpc/install_gae.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -TMP=$(mktemp -d /tmp/sdk.XXX) \ -&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ -&& unzip -q $TMP.zip -d $TMP \ -&& export PATH="$PATH:$TMP/go_appengine" \ No newline at end of file diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index d1a609e48..bb96ef57b 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -25,25 +25,41 @@ import ( // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error -// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC -// and it is the responsibility of the interceptor to call it. +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. +// Unary interceptors can be specified as a DialOption, using +// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a +// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC +// delegates all unary RPC invocations to the interceptor, and it is the +// responsibility of the interceptor to call invoker to complete the processing +// of the RPC. // -// Experimental +// method is the RPC name. req and reply are the corresponding request and +// response messages. cc is the ClientConn on which the RPC was invoked. invoker +// is the handler to complete the RPC and it is the responsibility of the +// interceptor to call it. opts contain all applicable call options, including +// defaults from the ClientConn as well as per-call options. // -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. +// The returned error must be compatible with the status package. type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) -// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O -// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. +// StreamClientInterceptor intercepts the creation of a ClientStream. Stream +// interceptors can be specified as a DialOption, using WithStreamInterceptor() +// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream +// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations +// to the interceptor, and it is the responsibility of the interceptor to call +// streamer. // -// Experimental +// desc contains a description of the stream. cc is the ClientConn on which the +// RPC was invoked. streamer is the handler to create a ClientStream and it is +// the responsibility of the interceptor to call it. opts contain all applicable +// call options, including defaults from the ClientConn as well as per-call +// options. // -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. +// StreamClientInterceptor may return a custom ClientStream to intercept all I/O +// operations. The returned error must be compatible with the status package. type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) // UnaryServerInfo consists of various information about a unary RPC on @@ -56,9 +72,12 @@ type UnaryServerInfo struct { } // UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the -// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as -// the status message of the RPC. +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 000000000..7ba8f4d18 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,382 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return errBalancerClosed + } + bw := &balancerWrapper{ + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return errBalancerClosed + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + for sc := range balToUpdate.subconns { + sc.Connect() + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + gsb.mu.Unlock() + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + return + } + balToUpdate.UpdateSubConnState(sc, state) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/remove +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if state.ConnectivityState == connectivity.Shutdown { + bw.gsb.mu.Lock() + delete(bw.subconns, sc) + bw.gsb.mu.Unlock() + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + bw.Balancer.UpdateSubConnState(sc, state) +} + +// Close closes the underlying LB policy and removes the subconns it created. bw +// must not be referenced via balancerCurrent or balancerPending in gsb when +// called. gsb.mu must not be held. Does not panic with a nil receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + bw.gsb.cc.RemoveSubConn(sc) + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + bw.gsb.cc.RemoveSubConn(sc) + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.RemoveSubConn(sc) +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 5cc3aeddb..0a25ce43f 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -31,7 +31,7 @@ import ( // Logger is the global binary logger. It can be used to get binary logger for // each method. type Logger interface { - getMethodLogger(methodName string) *MethodLogger + GetMethodLogger(methodName string) MethodLogger } // binLogger is the global binary logger for the binary. One of this should be @@ -49,17 +49,24 @@ func SetLogger(l Logger) { binLogger = l } +// GetLogger gets the binarg logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + // GetMethodLogger returns the methodLogger for the given methodName. // // methodName should be in the format of "/service/method". // // Each methodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func GetMethodLogger(methodName string) *MethodLogger { +func GetMethodLogger(methodName string) MethodLogger { if binLogger == nil { return nil } - return binLogger.getMethodLogger(methodName) + return binLogger.GetMethodLogger(methodName) } func init() { @@ -68,17 +75,29 @@ func init() { binLogger = NewLoggerFromConfigString(configStr) } -type methodLoggerConfig struct { +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { // Max length of header and message. - hdr, msg uint64 + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} } type logger struct { - all *methodLoggerConfig - services map[string]*methodLoggerConfig - methods map[string]*methodLoggerConfig + config LoggerConfig +} - blacklist map[string]struct{} +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} } // newEmptyLogger creates an empty logger. The map fields need to be filled in @@ -88,57 +107,57 @@ func newEmptyLogger() *logger { } // Set method logger for "*". -func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { - if l.all != nil { +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { return fmt.Errorf("conflicting global rules found") } - l.all = ml + l.config.All = ml return nil } // Set method logger for "service/*". // // New methodLogger with same service overrides the old one. -func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { - if _, ok := l.services[service]; ok { +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { return fmt.Errorf("conflicting service rules for service %v found", service) } - if l.services == nil { - l.services = make(map[string]*methodLoggerConfig) + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) } - l.services[service] = ml + l.config.Services[service] = ml return nil } // Set method logger for "service/method". // // New methodLogger with same method overrides the old one. -func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { - if _, ok := l.blacklist[method]; ok { +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.methods == nil { - l.methods = make(map[string]*methodLoggerConfig) + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) } - l.methods[method] = ml + l.config.Methods[method] = ml return nil } // Set blacklist method for "-service/method". func (l *logger) setBlacklist(method string) error { - if _, ok := l.blacklist[method]; ok { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.blacklist == nil { - l.blacklist = make(map[string]struct{}) + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) } - l.blacklist[method] = struct{}{} + l.config.Blacklist[method] = struct{}{} return nil } @@ -148,23 +167,23 @@ func (l *logger) setBlacklist(method string) error { // // Each methodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func (l *logger) getMethodLogger(methodName string) *MethodLogger { +func (l *logger) GetMethodLogger(methodName string) MethodLogger { s, m, err := grpcutil.ParseMethod(methodName) if err != nil { grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } - if ml, ok := l.methods[s+"/"+m]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return newMethodLogger(ml.Header, ml.Message) } - if _, ok := l.blacklist[s+"/"+m]; ok { + if _, ok := l.config.Blacklist[s+"/"+m]; ok { return nil } - if ml, ok := l.services[s]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Services[s]; ok { + return newMethodLogger(ml.Header, ml.Message) } - if l.all == nil { + if l.config.All == nil { return nil } - return newMethodLogger(l.all.hdr, l.all.msg) + return newMethodLogger(l.config.All.Header, l.config.All.Message) } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index d8f4e7602..ab589a76b 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { if err != nil { return fmt.Errorf("invalid config: %q, %v", config, err) } - if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } return nil @@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) } if m == "*" { - if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } else { - if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 0cdb41831..24df0a1a0 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -48,7 +48,11 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. -type MethodLogger struct { +type MethodLogger interface { + Log(LogEntryConfig) +} + +type methodLogger struct { headerMaxLen, messageMaxLen uint64 callID uint64 @@ -57,8 +61,8 @@ type MethodLogger struct { sink Sink // TODO(blog): make this plugable. } -func newMethodLogger(h, m uint64) *MethodLogger { - return &MethodLogger{ +func newMethodLogger(h, m uint64) *methodLogger { + return &methodLogger{ headerMaxLen: h, messageMaxLen: m, @@ -69,8 +73,10 @@ func newMethodLogger(h, m uint64) *MethodLogger { } } -// Log creates a proto binary log entry, and logs it to the sink. -func (ml *MethodLogger) Log(c LogEntryConfig) { +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in methodLogger as possible. +func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -85,11 +91,15 @@ func (ml *MethodLogger) Log(c LogEntryConfig) { case *pb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } + return m +} - ml.sink.Write(m) +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *methodLogger) Log(c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) } -func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -119,7 +129,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { return truncated } -func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *methodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go index 7d7a3056b..c2fdd58b3 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -69,7 +69,8 @@ type writerSink struct { func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { - grpclogLogger.Infof("binary logging: failed to marshal proto message: %v", err) + grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) + return err } hdr := make([]byte, 4) binary.BigEndian.PutUint32(hdr, uint32(len(b))) @@ -85,24 +86,27 @@ func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { func (ws *writerSink) Close() error { return nil } type bufferedSink struct { - mu sync.Mutex - closer io.Closer - out Sink // out is built on buf. - buf *bufio.Writer // buf is kept for flush. - - writeStartOnce sync.Once - writeTicker *time.Ticker + mu sync.Mutex + closer io.Closer + out Sink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + flusherStarted bool + + writeTicker *time.Ticker + done chan struct{} } func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { - // Start the write loop when Write is called. - fs.writeStartOnce.Do(fs.startFlushGoroutine) fs.mu.Lock() + defer fs.mu.Unlock() + if !fs.flusherStarted { + // Start the write loop when Write is called. + fs.startFlushGoroutine() + fs.flusherStarted = true + } if err := fs.out.Write(e); err != nil { - fs.mu.Unlock() return err } - fs.mu.Unlock() return nil } @@ -113,7 +117,12 @@ const ( func (fs *bufferedSink) startFlushGoroutine() { fs.writeTicker = time.NewTicker(bufFlushDuration) go func() { - for range fs.writeTicker.C { + for { + select { + case <-fs.done: + return + case <-fs.writeTicker.C: + } fs.mu.Lock() if err := fs.buf.Flush(); err != nil { grpclogLogger.Warningf("failed to flush to Sink: %v", err) @@ -124,10 +133,12 @@ func (fs *bufferedSink) startFlushGoroutine() { } func (fs *bufferedSink) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() if fs.writeTicker != nil { fs.writeTicker.Stop() } - fs.mu.Lock() + close(fs.done) if err := fs.buf.Flush(); err != nil { grpclogLogger.Warningf("failed to flush to Sink: %v", err) } @@ -137,7 +148,6 @@ func (fs *bufferedSink) Close() error { if err := fs.out.Close(); err != nil { grpclogLogger.Warningf("failed to close the Sink: %v", err) } - fs.mu.Unlock() return nil } @@ -155,5 +165,6 @@ func NewBufferedSink(o io.WriteCloser) Sink { closer: o, out: newWriterSink(bufW), buf: bufW, + done: make(chan struct{}), } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index f73141393..777cbcd79 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,6 +24,8 @@ package channelz import ( + "context" + "errors" "fmt" "sort" "sync" @@ -49,7 +51,8 @@ var ( // TurnOn turns on channelz data collection. func TurnOn() { if !IsOn() { - NewChannelzStorage() + db.set(newChannelMap()) + idGen.reset() atomic.StoreInt32(&curState, 1) } } @@ -94,46 +97,40 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorage initializes channelz data storage and id generator. +// NewChannelzStorageForTesting initializes channelz data storage and id +// generator for testing purposes. // -// This function returns a cleanup function to wait for all channelz state to be reset by the -// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests -// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen -// to remove some entity just register by the new test, since the id space is the same. -// -// Note: This function is exported for testing purpose only. User should not call -// it in most cases. -func NewChannelzStorage() (cleanup func() error) { - db.set(&channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - }) +// Returns a cleanup function to be invoked by the test, which waits for up to +// 10s for all channelz state to be reset by the grpc goroutines when those +// entities get closed. This cleanup function helps with ensuring that tests +// don't mess up each other. +func NewChannelzStorageForTesting() (cleanup func() error) { + db.set(newChannelMap()) idGen.reset() + return func() error { - var err error cm := db.get() if cm == nil { return nil } - for i := 0; i < 1000; i++ { - cm.mu.Lock() - if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { - cm.mu.Unlock() - // all things stored in the channelz map have been cleared. + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + for { + cm.mu.RLock() + topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) + cm.mu.RUnlock() + + if err := ctx.Err(); err != nil { + return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) + } + if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { return nil } - cm.mu.Unlock() - time.Sleep(10 * time.Millisecond) + <-ticker.C } - - cm.mu.Lock() - err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) - cm.mu.Unlock() - return err } } @@ -188,54 +185,77 @@ func GetServer(id int64) *ServerMetric { return db.get().GetServer(id) } -// RegisterChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). pid = 0 means no parent. It returns the unique channelz tracking id -// assigned to this channel. -func RegisterChannel(c Channel, pid int64, ref string) int64 { +// RegisterChannel registers the given channel c in the channelz database with +// ref as its reference name, and adds it to the child list of its parent +// (identified by pid). pid == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { id := idGen.genID() + var parent int64 + isTopChannel := true + if pid != nil { + isTopChannel = false + parent = pid.Int() + } + + if !IsOn() { + return newIdentifer(RefChannel, id, pid) + } + cn := &channel{ refName: ref, c: c, subChans: make(map[int64]string), nestedChans: make(map[int64]string), id: id, - pid: pid, + pid: parent, trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - if pid == 0 { - db.get().addChannel(id, cn, true, pid, ref) - } else { - db.get().addChannel(id, cn, false, pid, ref) - } - return id + db.get().addChannel(id, cn, isTopChannel, parent) + return newIdentifer(RefChannel, id, pid) } -// RegisterSubChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). It returns the unique channelz tracking id assigned to this subchannel. -func RegisterSubChannel(c Channel, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a SubChannel's parent id cannot be 0") - return 0 +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a SubChannel's parent id cannot be nil") } id := idGen.genID() + if !IsOn() { + return newIdentifer(RefSubChannel, id, pid), nil + } + sc := &subChannel{ refName: ref, c: c, sockets: make(map[int64]string), id: id, - pid: pid, + pid: pid.Int(), trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - db.get().addSubChannel(id, sc, pid, ref) - return id + db.get().addSubChannel(id, sc, pid.Int()) + return newIdentifer(RefSubChannel, id, pid), nil } // RegisterServer registers the given server s in channelz database. It returns // the unique channelz tracking id assigned to this server. -func RegisterServer(s Server, ref string) int64 { +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(s Server, ref string) *Identifier { id := idGen.genID() + if !IsOn() { + return newIdentifer(RefServer, id, nil) + } + svr := &server{ refName: ref, s: s, @@ -244,71 +264,92 @@ func RegisterServer(s Server, ref string) int64 { id: id, } db.get().addServer(id, svr) - return id + return newIdentifer(RefServer, id, nil) } // RegisterListenSocket registers the given listen socket s in channelz database // with ref as its reference name, and add it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this listen socket. -func RegisterListenSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a ListenSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a ListenSocket's parent id cannot be 0") } id := idGen.genID() - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addListenSocket(id, ls, pid, ref) - return id + if !IsOn() { + return newIdentifer(RefListenSocket, id, pid), nil + } + + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addListenSocket(id, ls, pid.Int()) + return newIdentifer(RefListenSocket, id, pid), nil } // RegisterNormalSocket registers the given normal socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent +// with ref as its reference name, and adds it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this normal socket. -func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a NormalSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a NormalSocket's parent id cannot be 0") } id := idGen.genID() - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addNormalSocket(id, ns, pid, ref) - return id + if !IsOn() { + return newIdentifer(RefNormalSocket, id, pid), nil + } + + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addNormalSocket(id, ns, pid.Int()) + return newIdentifer(RefNormalSocket, id, pid), nil } -// RemoveEntry removes an entry with unique channelz trakcing id to be id from +// RemoveEntry removes an entry with unique channelz tracking id to be id from // channelz database. -func RemoveEntry(id int64) { - db.get().removeEntry(id) +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id *Identifier) { + if !IsOn() { + return + } + db.get().removeEntry(id.Int()) } -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added -// to the channel trace. -// The Parent field is optional. It is used for event that will be recorded in the entity's parent -// trace also. +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe +// the event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. type TraceEventDesc struct { Desc string Severity Severity Parent *TraceEventDesc } -// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { - for d := desc; d != nil; d = d.Parent { - switch d.Severity { - case CtUnknown, CtInfo: - l.InfoDepth(depth+1, d.Desc) - case CtWarning: - l.WarningDepth(depth+1, d.Desc) - case CtError: - l.ErrorDepth(depth+1, d.Desc) - } +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { + // Log only the trace description associated with the bottom most entity. + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, withParens(id)+desc.Desc) + case CtWarning: + l.WarningDepth(depth+1, withParens(id)+desc.Desc) + case CtError: + l.ErrorDepth(depth+1, withParens(id)+desc.Desc) } + if getMaxTraceEntry() == 0 { return } - db.get().traceEvent(id, desc) + if IsOn() { + db.get().traceEvent(id.Int(), desc) + } } // channelMap is the storage data structure for channelz. @@ -326,6 +367,17 @@ type channelMap struct { normalSockets map[int64]*normalSocket } +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + } +} + func (c *channelMap) addServer(id int64, s *server) { c.mu.Lock() s.cm = c @@ -333,7 +385,7 @@ func (c *channelMap) addServer(id int64, s *server) { c.mu.Unlock() } -func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) { c.mu.Lock() cn.cm = c cn.trace.cm = c @@ -346,7 +398,7 @@ func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid in c.mu.Unlock() } -func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) { c.mu.Lock() sc.cm = c sc.trace.cm = c @@ -355,7 +407,7 @@ func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref stri c.mu.Unlock() } -func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) { c.mu.Lock() ls.cm = c c.listenSockets[id] = ls @@ -363,7 +415,7 @@ func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref c.mu.Unlock() } -func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) { c.mu.Lock() ns.cm = c c.normalSockets[id] = ns @@ -630,7 +682,7 @@ func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) if count == 0 { end = true } - var s []*SocketMetric + s := make([]*SocketMetric, 0, len(sks)) for _, ns := range sks { sm := &SocketMetric{} sm.SocketData = ns.s.ChannelzMetric() diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go new file mode 100644 index 000000000..c9a27acd3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/id.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "fmt" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier struct { + typ RefChannelType + id int64 + str string + pid *Identifier +} + +// Type returns the entity type corresponding to id. +func (id *Identifier) Type() RefChannelType { + return id.typ +} + +// Int returns the integer identifier corresponding to id. +func (id *Identifier) Int() int64 { + return id.id +} + +// String returns a string representation of the entity corresponding to id. +// +// This includes some information about the parent as well. Examples: +// Top-level channel: [Channel #channel-number] +// Nested channel: [Channel #parent-channel-number Channel #channel-number] +// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] +func (id *Identifier) String() string { + return id.str +} + +// Equal returns true if other is the same as id. +func (id *Identifier) Equal(other *Identifier) bool { + if (id != nil) != (other != nil) { + return false + } + if id == nil && other == nil { + return true + } + return id.typ == other.typ && id.id == other.id && id.pid == other.pid +} + +// NewIdentifierForTesting returns a new opaque identifier to be used only for +// testing purposes. +func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { + return newIdentifer(typ, id, pid) +} + +func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { + str := fmt.Sprintf("%s #%d", typ, id) + if pid != nil { + str = fmt.Sprintf("%s %s", pid, str) + } + return &Identifier{typ: typ, id: id, str: str, pid: pid} +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index b0013f9c8..8e13a3d2c 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -26,77 +26,54 @@ import ( var logger = grpclog.Component("channelz") +func withParens(id *Identifier) string { + return "[" + id.String() + "] " +} + // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, args...) - } +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, msg) - } +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, args...) - } +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, msg) - } +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtError, - }) - } else { - l.ErrorDepth(1, args...) - } +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtError, - }) - } else { - l.ErrorDepth(1, msg) - } +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 3c595d154..ad0ce4dab 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -686,12 +686,33 @@ const ( type RefChannelType int const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota // RefChannel indicates the referenced entity is a Channel. - RefChannel RefChannelType = iota + RefChannel // RefSubChannel indicates the referenced entity is a SubChannel. RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket ) +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + func (c *channelTrace) dumpData() *ChannelTrace { c.mu.Lock() ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go index 692dd6181..1b1c4cce3 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go index 19c2fc521..8b06eed1a 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * @@ -37,6 +38,6 @@ type SocketOptionData struct { // Windows OS doesn't support Socket Option func (s *SocketOptionData) Getsockopt(fd uintptr) { once.Do(func() { - logger.Warning("Channelz: socket options are not supported on non-linux os and appengine.") + logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go index fdf409d55..8d194e44e 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -1,5 +1,3 @@ -// +build linux,!appengine - /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go index 8864a0811..837ddc402 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go new file mode 100644 index 000000000..32c9b5903 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -0,0 +1,49 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "context" +) + +// requestInfoKey is a struct to be used as the key to store RequestInfo in a +// context. +type requestInfoKey struct{} + +// NewRequestInfoContext creates a context with ri. +func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) +} + +// RequestInfoFromContext extracts the RequestInfo from ctx. +func RequestInfoFromContext(ctx context.Context) interface{} { + return ctx.Value(requestInfoKey{}) +} + +// clientHandshakeInfoKey is a struct used as the key to store +// ClientHandshakeInfo in a context. +type clientHandshakeInfoKey struct{} + +// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. +func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { + return ctx.Value(clientHandshakeInfoKey{}) +} + +// NewClientHandshakeInfoContext creates a context with chi. +func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go index be70b6cdf..25ade6230 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2020 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go deleted file mode 100644 index af6f57719..000000000 --- a/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build appengine - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package credentials - -import ( - "crypto/tls" - "net/url" -) - -// SPIFFEIDFromState is a no-op for appengine builds. -func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { - return nil -} diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go index f499a614c..2919632d6 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go +++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go index 55664fa46..f792fd22c 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/util.go +++ b/vendor/google.golang.org/grpc/internal/credentials/util.go @@ -18,7 +18,9 @@ package credentials -import "crypto/tls" +import ( + "crypto/tls" +) const alpnProtoStrH2 = "h2" diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 73931a94b..6f0272543 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -26,13 +26,10 @@ import ( const ( prefix = "GRPC_GO_" - retryStr = prefix + "RETRY" txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" ) var ( - // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". - Retry = strings.EqualFold(os.Getenv(retryStr), "on") // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") ) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go new file mode 100644 index 000000000..7d996e51b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -0,0 +1,101 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import ( + "os" + "strings" +) + +const ( + // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. + // Do not use this and read from env directly. Its value is read and kept in + // variable XDSBootstrapFileName. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file + // content. Do not use this and read from env directly. Its value is read + // and kept in variable XDSBootstrapFileContent. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" + + ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" + clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" + aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" + outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" + federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" + rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" + + c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" +) + +var ( + // XDSBootstrapFileName holds the name of the file which contains xDS + // bootstrap configuration. Users can specify the location of the bootstrap + // file by setting the environment variable "GRPC_XDS_BOOTSTRAP". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv) + // XDSBootstrapFileContent holds the content of the xDS bootstrap + // configuration. Users can specify the bootstrap config by setting the + // environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) + // XDSRingHash indicates whether ring hash support is enabled, which can be + // disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". + XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false") + // XDSClientSideSecurity is used to control processing of security + // configuration on the client-side. + // + // Note that there is no env var protection for the server-side because we + // have a brand new API on the server-side and users explicitly need to use + // the new API to get security integration on the server. + XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false") + // XDSAggregateAndDNS indicates whether processing of aggregated cluster + // and DNS cluster is enabled, which can be enabled by setting the + // environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to + // "true". + XDSAggregateAndDNS = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") + + // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, + // which can be disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". + XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") + // XDSOutlierDetection indicates whether outlier detection support is + // enabled, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true". + XDSOutlierDetection = strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "true") + // XDSFederation indicates whether federation support is enabled. + XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") + + // XDSRLS indicates whether processing of Cluster Specifier plugins and + // support for the RLS CLuster Specifier is enabled, which can be enabled by + // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to + // "true". + XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true") + + // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) +) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index e6f975cbf..30a3b4258 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -115,12 +115,12 @@ type LoggerV2 interface { // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...interface{}) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. WarningDepth(depth int, args ...interface{}) - // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. ErrorDepth(depth int, args ...interface{}) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...interface{}) } diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index 200b115ca..740f83c2b 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -31,26 +31,37 @@ var ( mu sync.Mutex ) +// Int implements rand.Int on the grpcrand global source. +func Int() int { + mu.Lock() + defer mu.Unlock() + return r.Int() +} + // Int63n implements rand.Int63n on the grpcrand global source. func Int63n(n int64) int64 { mu.Lock() - res := r.Int63n(n) - mu.Unlock() - return res + defer mu.Unlock() + return r.Int63n(n) } // Intn implements rand.Intn on the grpcrand global source. func Intn(n int) int { mu.Lock() - res := r.Intn(n) - mu.Unlock() - return res + defer mu.Unlock() + return r.Intn(n) } // Float64 implements rand.Float64 on the grpcrand global source. func Float64() float64 { mu.Lock() - res := r.Float64() - mu.Unlock() - return res + defer mu.Unlock() + return r.Float64() +} + +// Uint64 implements rand.Uint64 on the grpcrand global source. +func Uint64() uint64 { + mu.Lock() + defer mu.Unlock() + return r.Uint64() } diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go similarity index 72% rename from vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go rename to vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go index a6144cd66..e2f948e8f 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go @@ -1,8 +1,6 @@ -// +build appengine - /* * - * Copyright 2018 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,13 +16,5 @@ * */ -package credentials - -import ( - "net" -) - -// WrapSyscallConn returns newConn on appengine. -func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { - return newConn -} +// Package grpcutil provides utility functions used across the gRPC codebase. +package grpcutil diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go similarity index 63% rename from vendor/google.golang.org/grpc/internal/resolver/dns/go113.go rename to vendor/google.golang.org/grpc/internal/grpcutil/regex.go index 8783a8cf8..7a092b2b8 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go @@ -1,8 +1,6 @@ -// +build go1.13 - /* * - * Copyright 2019 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,16 +16,16 @@ * */ -package dns +package grpcutil -import "net" +import "regexp" -func init() { - filterError = func(err error) error { - if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound { - // The name does not exist; not an error. - return nil - } - return err +// FullMatchWithRegex returns whether the full text matches the regex provided. +func FullMatchWithRegex(re *regexp.Regexp, text string) bool { + if len(text) == 0 { + return re.MatchString(text) } + re.Longest() + rem := re.FindString(text) + return len(rem) == len(text) } diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/target.go b/vendor/google.golang.org/grpc/internal/grpcutil/target.go deleted file mode 100644 index 80b33cdaf..000000000 --- a/vendor/google.golang.org/grpc/internal/grpcutil/target.go +++ /dev/null @@ -1,55 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcutil provides a bunch of utility functions to be used across the -// gRPC codebase. -package grpcutil - -import ( - "strings" - - "google.golang.org/grpc/resolver" -) - -// split2 returns the values from strings.SplitN(s, sep, 2). -// If sep is not found, it returns ("", "", false) instead. -func split2(s, sep string) (string, string, bool) { - spl := strings.SplitN(s, sep, 2) - if len(spl) < 2 { - return "", "", false - } - return spl[0], spl[1], true -} - -// ParseTarget splits target into a resolver.Target struct containing scheme, -// authority and endpoint. -// -// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: -// target}. -func ParseTarget(target string) (ret resolver.Target) { - var ok bool - ret.Scheme, ret.Endpoint, ok = split2(target, "://") - if !ok { - return resolver.Target{Endpoint: target} - } - ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") - if !ok { - return resolver.Target{Endpoint: target} - } - return ret -} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 716d92800..6d355b0b0 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -38,17 +38,10 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // NewRequestInfoContext creates a new context based on the argument context attaching - // the passed in RequestInfo to the new context. - NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context - // NewClientHandshakeInfoContext returns a copy of the input context with - // the passed in ClientHandshakeInfo struct added to it. - NewClientHandshakeInfoContext interface{} // func(context.Context, credentials.ClientHandshakeInfo) context.Context - // ParseServiceConfigForTesting is for creating a fake - // ClientConn for resolver testing only - ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and - // parsing. Both a and b should be returned by ParseServiceConfigForTesting. + // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the // there's difference in white space. EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool @@ -57,6 +50,19 @@ var ( // bootstrap code while parsing certificate provider configs in the // bootstrap file. GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder + // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo + // stored in the passed in attributes. This is set by + // credentials/xds/xds.go. + GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo + // GetServerCredentials returns the transport credentials configured on a + // gRPC server. An xDS-enabled server needs to know what type of credentials + // is configured on the underlying gRPC server. This is set by server.go. + GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + // DrainServerTransports initiates a graceful close of existing connections + // on a gRPC server accepted on the provided listener address. An + // xDS-enabled server invokes this method on a grpc.Server when a particular + // listener moves to "not-serving" mode. + DrainServerTransports interface{} // func(*grpc.Server, string) ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -79,3 +85,9 @@ const ( // that supports backend returned by grpclb balancer. CredsBundleModeBackendFromBalancer = "backend-from-balancer" ) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go new file mode 100644 index 000000000..b2980f8ac --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -0,0 +1,120 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata contains functions to set and get metadata from addresses. +// +// This package is experimental. +package metadata + +import ( + "fmt" + "strings" + + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +type mdKeyType string + +const mdKey = mdKeyType("grpc.internal.address.metadata") + +type mdValue metadata.MD + +func (m mdValue) Equal(o interface{}) bool { + om, ok := o.(mdValue) + if !ok { + return false + } + if len(m) != len(om) { + return false + } + for k, v := range m { + ov := om[k] + if len(ov) != len(v) { + return false + } + for i, ve := range v { + if ov[i] != ve { + return false + } + } + } + return true +} + +// Get returns the metadata of addr. +func Get(addr resolver.Address) metadata.MD { + attrs := addr.Attributes + if attrs == nil { + return nil + } + md, _ := attrs.Value(mdKey).(mdValue) + return metadata.MD(md) +} + +// Set sets (overrides) the metadata in addr. +// +// When a SubConn is created with this address, the RPCs sent on it will all +// have this metadata. +func Set(addr resolver.Address, md metadata.MD) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) + return addr +} + +// Validate returns an error if the input md contains invalid keys or values. +// +// If the header is not a pseudo-header, the following items are checked: +// - header names must contain one or more characters from this set [0-9 a-z _ - .]. +// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed. +// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E]. +func Validate(md metadata.MD) error { + for k, vals := range md { + // pseudo-header will be ignored + if k[0] == ':' { + continue + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(k); i++ { + r := k[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k) + } + } + if strings.HasSuffix(k, "-bin") { + continue + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k) + } + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go new file mode 100644 index 000000000..0177af4b5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/golang/protobuf/jsonpb" + protov1 "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protov2 "google.golang.org/protobuf/proto" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e interface{}) string { + switch ee := e.(type) { + case protov1.Message: + mm := jsonpb.Marshaler{Indent: jsonIndent} + ret, err := mm.MarshalToString(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return ret + case protov2.Message: + mm := protojson.MarshalOptions{ + Multiline: true, + Indent: jsonIndent, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + default: + ret, err := json.MarshalIndent(ee, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go new file mode 100644 index 000000000..c7a18a948 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -0,0 +1,167 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver provides internal resolver-related functionality. +package resolver + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +// ConfigSelector controls what configuration to use for every RPC. +type ConfigSelector interface { + // Selects the configuration for the RPC, or terminates it using the error. + // This error will be converted by the gRPC library to a status error with + // code UNKNOWN if it is not returned as a status error. + SelectConfig(RPCInfo) (*RPCConfig, error) +} + +// RPCInfo contains RPC information needed by a ConfigSelector. +type RPCInfo struct { + // Context is the user's context for the RPC and contains headers and + // application timeout. It is passed for interception purposes and for + // efficiency reasons. SelectConfig should not be blocking. + Context context.Context + Method string // i.e. "/Service/Method" +} + +// RPCConfig describes the configuration to use for each RPC. +type RPCConfig struct { + // The context to use for the remainder of the RPC; can pass info to LB + // policy or affect timeout or metadata. + Context context.Context + MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC + OnCommitted func() // Called when the RPC has been committed (retries no longer possible) + Interceptor ClientInterceptor +} + +// ClientStream is the same as grpc.ClientStream, but defined here for circular +// dependency reasons. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// ClientInterceptor is an interceptor for gRPC client streams. +type ClientInterceptor interface { + // NewStream produces a ClientStream for an RPC which may optionally use + // the provided function to produce a stream for delegation. Note: + // RPCInfo.Context should not be used (will be nil). + // + // done is invoked when the RPC is finished using its connection, or could + // not be assigned a connection. RPC operations may still occur on + // ClientStream after done is called, since the interceptor is invoked by + // application-layer operations. done must never be nil when called. + NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) +} + +// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side. +type ServerInterceptor interface { + // AllowRPC checks if an incoming RPC is allowed to proceed based on + // information about connection RPC was received on, and HTTP Headers. This + // information will be piped into context. + AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting. +} + +type csKeyType string + +const csKey = csKeyType("grpc.internal.resolver.configSelector") + +// SetConfigSelector sets the config selector in state and returns the new +// state. +func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { + state.Attributes = state.Attributes.WithValue(csKey, cs) + return state +} + +// GetConfigSelector retrieves the config selector from state, if present, and +// returns it or nil if absent. +func GetConfigSelector(state resolver.State) ConfigSelector { + cs, _ := state.Attributes.Value(csKey).(ConfigSelector) + return cs +} + +// SafeConfigSelector allows for safe switching of ConfigSelector +// implementations such that previous values are guaranteed to not be in use +// when UpdateConfigSelector returns. +type SafeConfigSelector struct { + mu sync.RWMutex + cs ConfigSelector +} + +// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until +// all uses of the previous ConfigSelector have completed. +func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) { + scs.mu.Lock() + defer scs.mu.Unlock() + scs.cs = cs +} + +// SelectConfig defers to the current ConfigSelector in scs. +func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) { + scs.mu.RLock() + defer scs.mu.RUnlock() + return scs.cs.SelectConfig(r) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 304235566..75301c514 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -34,6 +34,7 @@ import ( grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/resolver" @@ -46,6 +47,13 @@ var EnableSRVLookups = false var logger = grpclog.Component("dns") +// Globals to stub out in tests. TODO: Perhaps these two can be combined into a +// single variable for testing the resolver? +var ( + newTimer = time.NewTimer + newTimerDNSResRate = time.NewTimer +) + func init() { resolver.Register(NewBuilder()) } @@ -143,7 +151,6 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts d.wg.Add(1) go d.watcher() - d.ResolveNow(resolver.ResolveNowOptions{}) return d, nil } @@ -201,28 +208,38 @@ func (d *dnsResolver) Close() { func (d *dnsResolver) watcher() { defer d.wg.Done() + backoffIndex := 1 for { - select { - case <-d.ctx.Done(): - return - case <-d.rn: - } - state, err := d.lookup() if err != nil { + // Report error to the underlying grpc.ClientConn. d.cc.ReportError(err) } else { - d.cc.UpdateState(*state) + err = d.cc.UpdateState(*state) } - // Sleep to prevent excessive re-resolutions. Incoming resolution requests - // will be queued in d.rn. - t := time.NewTimer(minDNSResRate) + var timer *time.Timer + if err == nil { + // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least + // to prevent constantly re-resolving. + backoffIndex = 1 + timer = newTimerDNSResRate(minDNSResRate) + select { + case <-d.ctx.Done(): + timer.Stop() + return + case <-d.rn: + } + } else { + // Poll on an error found in DNS Resolver or an error received from ClientConn. + timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + backoffIndex++ + } select { - case <-t.C: case <-d.ctx.Done(): - t.Stop() + timer.Stop() return + case <-timer.C: } } } @@ -260,18 +277,13 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { return newAddrs, nil } -var filterError = func(err error) error { +func handleDNSError(err error, lookupType string) error { if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). return nil } - return err -} - -func handleDNSError(err error, lookupType string) error { - err = filterError(err) if err != nil { err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) logger.Info(err) @@ -306,12 +318,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { } func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { - var newAddrs []resolver.Address addrs, err := d.resolver.LookupHost(d.ctx, d.host) if err != nil { err = handleDNSError(err, "A") return nil, err } + newAddrs := make([]resolver.Address, 0, len(addrs)) for _, a := range addrs { ip, ok := formatIP(a) if !ok { diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go new file mode 100644 index 000000000..20852e59d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package unix implements a resolver for unix targets. +package unix + +import ( + "fmt" + + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/resolver" +) + +const unixScheme = "unix" +const unixAbstractScheme = "unix-abstract" + +type builder struct { + scheme string +} + +func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { + if target.Authority != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) + } + + // gRPC was parsing the dial target manually before PR #4817, and we + // switched to using url.Parse() in that PR. To avoid breaking existing + // resolver implementations we ended up stripping the leading "/" from the + // endpoint. This obviously does not work for the "unix" scheme. Hence we + // end up using the parsed URL instead. + endpoint := target.URL.Path + if endpoint == "" { + endpoint = target.URL.Opaque + } + addr := resolver.Address{Addr: endpoint} + if b.scheme == unixAbstractScheme { + // prepend "\x00" to address for unix-abstract + addr.Addr = "\x00" + addr.Addr + } + cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) + return &nopResolver{}, nil +} + +func (b *builder) Scheme() string { + return b.scheme +} + +type nopResolver struct { +} + +func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (*nopResolver) Close() {} + +func init() { + resolver.Register(&builder{scheme: unixScheme}) + resolver.Register(&builder{scheme: unixAbstractScheme}) +} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go index af3e2b5f7..badbdbf59 100644 --- a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -22,8 +22,10 @@ package serviceconfig import ( "encoding/json" "fmt" + "time" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" externalserviceconfig "google.golang.org/grpc/serviceconfig" ) @@ -44,6 +46,22 @@ type BalancerConfig struct { type intermediateBalancerConfig []map[string]json.RawMessage +// MarshalJSON implements the json.Marshaler interface. +// +// It marshals the balancer and config into a length-1 slice +// ([]map[string]config). +func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { + if bc.Config == nil { + // If config is nil, return empty config `{}`. + return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil + } + c, err := json.Marshal(bc.Config) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil +} + // UnmarshalJSON implements the json.Unmarshaler interface. // // ServiceConfig contains a list of loadBalancingConfigs, each with a name and @@ -60,6 +78,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { return err } + var names []string for i, lbcfg := range ir { if len(lbcfg) != 1 { return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) @@ -74,6 +93,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { for name, jsonCfg = range lbcfg { } + names = append(names, name) builder := balancer.Get(name) if builder == nil { // If the balancer is not registered, move on to the next config. @@ -102,5 +122,59 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { // return. This means we had a loadBalancingConfig slice but did not // encounter a registered policy. The config is considered invalid in this // case. - return fmt.Errorf("invalid loadBalancingConfig: no supported policies found") + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names) +} + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int + // RetryPolicy configures retry options for the method. + RetryPolicy *RetryPolicy +} + +// RetryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type RetryPolicy struct { + // MaxAttempts is the maximum number of attempts, including the original RPC. + // + // This field is required and must be two or greater. + MaxAttempts int + + // Exponential backoff parameters. The initial retry attempt will occur at + // random(0, initialBackoff). In general, the nth attempt will occur at + // random(0, + // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). + // + // These fields are required and must be greater than zero. + InitialBackoff time.Duration + MaxBackoff time.Duration + BackoffMultiplier float64 + + // The set of status codes which may be retried. + // + // Status codes are specified as strings, e.g., "UNAVAILABLE". + // + // This field is required and must be non-empty. + // Note: a set is used to store this for easy lookup. + RetryableStatusCodes map[codes.Code]bool } diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 710223b8d..e5c6513ed 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -97,7 +97,7 @@ func (s *Status) Err() error { if s.Code() == codes.OK { return nil } - return &Error{e: s.Proto()} + return &Error{s: s} } // WithDetails returns a new status with the provided details messages appended to the status. @@ -136,19 +136,23 @@ func (s *Status) Details() []interface{} { return details } +func (s *Status) String() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message()) +} + // Error wraps a pointer of a status proto. It implements error and Status, // and a nil *Error should never be returned by this package. type Error struct { - e *spb.Status + s *Status } func (e *Error) Error() string { - return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(e.e.GetCode()), e.e.GetMessage()) + return e.s.String() } // GRPCStatus returns the Status represented by se. func (e *Error) GRPCStatus() *Status { - return FromProto(e.e) + return e.s } // Is implements future error.Is functionality. @@ -158,5 +162,5 @@ func (e *Error) Is(target error) bool { if !ok { return false } - return proto.Equal(e.e, tse.e) + return proto.Equal(e.s.s, tse.s.s) } diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go index 2fdcb76e6..b3a72276d 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. @@ -44,25 +42,23 @@ func GetCPUTime() int64 { } // Rusage is an alias for syscall.Rusage under linux environment. -type Rusage syscall.Rusage +type Rusage = syscall.Rusage // GetRusage returns the resource usage of current process. -func GetRusage() (rusage *Rusage) { - rusage = new(Rusage) - syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage)) - return +func GetRusage() *Rusage { + rusage := new(Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, rusage) + return rusage } // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { - f := (*syscall.Rusage)(first) - l := (*syscall.Rusage)(latest) var ( - utimeDiffs = l.Utime.Sec - f.Utime.Sec - utimeDiffus = l.Utime.Usec - f.Utime.Usec - stimeDiffs = l.Stime.Sec - f.Stime.Sec - stimeDiffus = l.Stime.Usec - f.Stime.Usec + utimeDiffs = latest.Utime.Sec - first.Utime.Sec + utimeDiffus = latest.Utime.Usec - first.Utime.Usec + stimeDiffs = latest.Stime.Sec - first.Stime.Sec + stimeDiffus = latest.Stime.Usec - first.Stime.Usec ) uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index adae60d65..999f52cd7 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * @@ -35,41 +36,41 @@ var logger = grpclog.Component("core") func log() { once.Do(func() { - logger.Info("CPU time info is unavailable on non-linux or appengine environment.") + logger.Info("CPU time info is unavailable on non-linux environments.") }) } -// GetCPUTime returns the how much CPU time has passed since the start of this process. -// It always returns 0 under non-linux or appengine environment. +// GetCPUTime returns the how much CPU time has passed since the start of this +// process. It always returns 0 under non-linux environments. func GetCPUTime() int64 { log() return 0 } -// Rusage is an empty struct under non-linux or appengine environment. +// Rusage is an empty struct under non-linux environments. type Rusage struct{} -// GetRusage is a no-op function under non-linux or appengine environment. -func GetRusage() (rusage *Rusage) { +// GetRusage is a no-op function under non-linux environments. +func GetRusage() *Rusage { log() return nil } // CPUTimeDiff returns the differences of user CPU time and system CPU time used -// between two Rusage structs. It a no-op function for non-linux or appengine environment. +// between two Rusage structs. It a no-op function for non-linux environments. func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { log() return 0, 0 } -// SetTCPUserTimeout is a no-op function under non-linux or appengine environments +// SetTCPUserTimeout is a no-op function under non-linux environments. func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { log() return nil } -// GetTCPUserTimeout is a no-op function under non-linux or appengine environments -// a negative return value indicates the operation is not supported +// GetTCPUserTimeout is a no-op function under non-linux environments. +// A negative return value indicates the operation is not supported func GetTCPUserTimeout(conn net.Conn) (int, error) { log() return -1, nil diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 40ef23923..244f4b081 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -20,13 +20,17 @@ package transport import ( "bytes" + "errors" "fmt" "runtime" + "strconv" "sync" "sync/atomic" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/status" ) var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { @@ -128,6 +132,16 @@ type cleanupStream struct { func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM +type earlyAbortStream struct { + httpStatus uint32 + streamID uint32 + contentSubtype string + status *status.Status + rst bool +} + +func (*earlyAbortStream) isTransportResponseFrame() bool { return false } + type dataFrame struct { streamID uint32 endStream bool @@ -284,7 +298,7 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Value // *chan struct{} + trfChan atomic.Value // chan struct{} } func newControlBuffer(done <-chan struct{}) *controlBuffer { @@ -298,10 +312,10 @@ func newControlBuffer(done <-chan struct{}) *controlBuffer { // throttle blocks if there are too many incomingSettings/cleanupStreams in the // controlbuf. func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(*chan struct{}) + ch, _ := c.trfChan.Load().(chan struct{}) if ch != nil { select { - case <-*ch: + case <-ch: case <-c.done: } } @@ -335,8 +349,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. - ch := make(chan struct{}) - c.trfChan.Store(&ch) + c.trfChan.Store(make(chan struct{})) } } c.mu.Unlock() @@ -377,9 +390,9 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are removing the frame that put us over the // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(*chan struct{}) - close(*ch) - c.trfChan.Store((*chan struct{})(nil)) + ch := c.trfChan.Load().(chan struct{}) + close(ch) + c.trfChan.Store((chan struct{})(nil)) } c.transportResponseFrames-- } @@ -395,7 +408,6 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { select { case <-c.ch: case <-c.done: - c.finish() return nil, ErrConnClosing } } @@ -420,6 +432,14 @@ func (c *controlBuffer) finish() { hdr.onOrphaned(ErrConnClosing) } } + // In case throttle() is currently in flight, it needs to be unblocked. + // Otherwise, the transport may not close, since the transport is closed by + // the reader encountering the connection error. + ch, _ := c.trfChan.Load().(chan struct{}) + if ch != nil { + close(ch) + } + c.trfChan.Store((chan struct{})(nil)) c.mu.Unlock() } @@ -749,6 +769,32 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { return nil } +func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { + if l.side == clientSide { + return errors.New("earlyAbortStream not handled on client") + } + // In case the caller forgets to set the http status, default to 200. + if eas.httpStatus == 0 { + eas.httpStatus = 200 + } + headerFields := []hpack.HeaderField{ + {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))}, + {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, + {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, + {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, + } + + if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { + return err + } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } + } + return nil +} + func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true @@ -787,6 +833,8 @@ func (l *loopyWriter) handle(i interface{}) error { return l.registerStreamHandler(i) case *cleanupStream: return l.cleanupStreamHandler(i) + case *earlyAbortStream: + return l.earlyAbortStreamHandler(i) case *incomingGoAway: return l.incomingGoAwayHandler(i) case *dataFrame: diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index f262edd8e..97198c515 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -136,12 +136,10 @@ type inFlow struct { // newLimit updates the inflow window to a new value n. // It assumes that n is always greater than the old limit. -func (f *inFlow) newLimit(n uint32) uint32 { +func (f *inFlow) newLimit(n uint32) { f.mu.Lock() - d := n - f.limit f.limit = n f.mu.Unlock() - return d } func (f *inFlow) maybeAdjust(n uint32) uint32 { diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 05d3871e6..1c3459c2b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -141,9 +141,8 @@ type serverHandlerTransport struct { stats stats.Handler } -func (ht *serverHandlerTransport) Close() error { +func (ht *serverHandlerTransport) Close() { ht.closeOnce.Do(ht.closeCloseChanOnce) - return nil } func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index e73b77a15..24ca59084 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -24,6 +24,8 @@ import ( "io" "math" "net" + "net/http" + "path/filepath" "strconv" "strings" "sync" @@ -32,13 +34,14 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/internal/grpcutil" - "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" + icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -59,7 +62,7 @@ type http2Client struct { cancel context.CancelFunc ctxDone <-chan struct{} // Cache the ctx.Done() chan. userAgent string - md interface{} + md metadata.MD conn net.Conn // underlying communication channel loopy *loopyWriter remoteAddr net.Addr @@ -114,6 +117,9 @@ type http2Client struct { // goAwayReason records the http2.ErrCode and debug data received with the // GoAway frame. goAwayReason GoAwayReason + // goAwayDebugMessage contains a detailed human readable string about a + // GoAway frame, useful for error messages. + goAwayDebugMessage string // A condition variable used to signal when the keepalive goroutine should // go dormant. The condition for dormancy is based on the number of active // streams and the `PermitWithoutStream` keepalive client parameter. And @@ -126,7 +132,7 @@ type http2Client struct { kpDormant bool // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData onGoAway func(GoAwayReason) @@ -137,11 +143,34 @@ type http2Client struct { connectionID uint64 } -func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { + address := addr.Addr + networkType, ok := networktype.Get(addr) if fn != nil { - return fn(ctx, addr) + // Special handling for unix scheme with custom dialer. Back in the day, + // we did not have a unix resolver and therefore targets with a unix + // scheme would end up using the passthrough resolver. So, user's used a + // custom dialer in this case and expected the original dial target to + // be passed to the custom dialer. Now, we have a unix resolver. But if + // a custom dialer is specified, we want to retain the old behavior in + // terms of the address being passed to the custom dialer. + if networkType == "unix" && !strings.HasPrefix(address, "\x00") { + // Supported unix targets are either "unix://absolute-path" or + // "unix:relative-path". + if filepath.IsAbs(address) { + return fn(ctx, "unix://"+address) + } + return fn(ctx, "unix:"+address) + } + return fn(ctx, address) } - return (&net.Dialer{}).DialContext(ctx, "tcp", addr) + if !ok { + networkType, address = parseDialTarget(address) + } + if networkType == "tcp" && useProxy { + return proxyDial(ctx, address, grpcUA) + } + return (&net.Dialer{}).DialContext(ctx, networkType, address) } func isTemporary(err error) bool { @@ -172,7 +201,13 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }() - conn, err := dial(connectCtx, opts.Dialer, addr.Addr) + // gRPC, resolver, balancer etc. can specify arbitrary data in the + // Attributes field of resolver.Address, which is shoved into connectCtx + // and passed to the dialer and credential handshaker. This makes it possible for + // address specific arbitrary data to reach custom dialers and credential handshakers. + connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) if err != nil { if opts.FailOnNonTempDialError { return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) @@ -216,16 +251,30 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } } if transportCreds != nil { - // gRPC, resolver, balancer etc. can specify arbitrary data in the - // Attributes field of resolver.Address, which is shoved into connectCtx - // and passed to the credential handshaker. This makes it possible for - // address specific arbitrary data to reach the credential handshaker. - contextWithHandshakeInfo := internal.NewClientHandshakeInfoContext.(func(context.Context, credentials.ClientHandshakeInfo) context.Context) - connectCtx = contextWithHandshakeInfo(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) + rawConn := conn + // Pull the deadline from the connectCtx, which will be used for + // timeouts in the authentication protocol handshake. Can ignore the + // boolean as the deadline will return the zero value, which will make + // the conn not timeout on I/O operations. + deadline, _ := connectCtx.Deadline() + rawConn.SetDeadline(deadline) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn) + rawConn.SetDeadline(time.Time{}) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } + for _, cd := range perRPCCreds { + if cd.RequireTransportSecurity() { + if ci, ok := authInfo.(interface { + GetCommonAuthInfo() credentials.CommonAuthInfo + }); ok { + secLevel := ci.GetCommonAuthInfo().SecurityLevel + if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity { + return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection") + } + } + } + } isSecure = true if transportCreds.Info().SecurityProtocol == "tls" { scheme = "https" @@ -248,7 +297,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ctxDone: ctx.Done(), // Cache Done chan. cancel: cancel, userAgent: opts.UserAgent, - md: addr.Metadata, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -276,6 +324,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), } + + if md, ok := addr.Metadata.(*metadata.MD); ok { + t.md = *md + } else if md := imetadata.Get(addr); md != nil { + t.md = md + } t.controlBuf = newControlBuffer(t.ctxDone) if opts.InitialWindowSize >= defaultWindowSize { t.initialWindowSize = opts.InitialWindowSize @@ -297,8 +351,9 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } t.statsHandler.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + if err != nil { + return nil, err } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) @@ -312,12 +367,14 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts // Send connection preface to server. n, err := t.conn.Write(clientPreface) if err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + t.Close(err) + return nil, err } if n != len(clientPreface) { - t.Close() - return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + t.Close(err) + return nil, err } var ss []http2.Setting @@ -335,14 +392,16 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } err = t.framer.fr.WriteSettings(ss...) if err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + t.Close(err) + return nil, err } // Adjust the connection flow control window if needed. if delta := uint32(icwz - defaultWindowSize); delta > 0 { if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) + err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) + t.Close(err) + return nil, err } } @@ -359,11 +418,10 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) } } - // If it's a connection error, let reader goroutine handle it - // since there might be data in the buffers. - if _, ok := err.(net.Error); !ok { - t.conn.Close() - } + // Do not close the transport. Let reader goroutine handle it since + // there might be data in the buffers. + t.conn.Close() + t.controlBuf.finish() close(t.writerDone) }() return t, nil @@ -379,6 +437,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { buf: newRecvBuffer(), headerChan: make(chan struct{}), contentSubtype: callHdr.ContentSubtype, + doneFunc: callHdr.DoneFunc, } s.wq = newWriteQuota(defaultWriteQuota, s.done) s.requestRead = func(n int) { @@ -418,7 +477,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) Method: callHdr.Method, AuthInfo: t.authInfo, } - ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri) + ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) if err != nil { return nil, err @@ -481,25 +540,23 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) for _, vv := range added { for i, v := range vv { if i%2 == 0 { - k = v + k = strings.ToLower(v) continue } // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. if isReservedHeader(k) { continue } - headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } } - if md, ok := t.md.(*metadata.MD); ok { - for k, vv := range *md { - if isReservedHeader(k) { - continue - } - for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } + for k, vv := range t.md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } return headerFields, nil @@ -532,7 +589,7 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s return nil, err } - return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) + return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err) } for k, v := range data { // Capital header names are illegal in HTTP/2. @@ -549,8 +606,11 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // Note: if these credentials are provided both via dial options and call // options, then both sets of credentials will be applied. if callCreds := callHdr.Creds; callCreds != nil { - if !t.isSecure && callCreds.RequireTransportSecurity() { - return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + if callCreds.RequireTransportSecurity() { + ri, _ := credentials.RequestInfoFromContext(ctx) + if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil { + return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } } data, err := callCreds.GetRequestMetadata(ctx, audience) if err != nil { @@ -566,26 +626,34 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call return callAuthData, nil } -// PerformedIOError wraps an error to indicate IO may have been performed -// before the error occurred. -type PerformedIOError struct { +// NewStreamError wraps an error and reports additional information. Typically +// NewStream errors result in transparent retry, as they mean nothing went onto +// the wire. However, there are two notable exceptions: +// +// 1. If the stream headers violate the max header list size allowed by the +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. +type NewStreamError struct { Err error + + AllowTransparentRetry bool } -// Error implements error. -func (p PerformedIOError) Error() string { - return p.Err.Error() +func (e NewStreamError) Error() string { + return e.Err.Error() } // NewStream creates a stream and registers it into the transport as "active" -// streams. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { +// streams. All non-nil errors returned will be *NewStreamError. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - // We may have performed I/O in the per-RPC creds callback, so do not - // allow transparent retry. - return nil, PerformedIOError{err} + return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -685,23 +753,24 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return true }, hdr) if err != nil { - return nil, err + // Connection closed. + return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} } if success { break } if hdrListSizeErr != nil { - return nil, hdrListSizeErr + return nil, &NewStreamError{Err: hdrListSizeErr} } firstTry = false select { case <-ch: - case <-s.ctx.Done(): - return nil, ContextErr(s.ctx.Err()) + case <-ctx.Done(): + return nil, &NewStreamError{Err: ContextErr(ctx.Err())} case <-t.goAway: - return nil, errStreamDrain + return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} case <-t.ctx.Done(): - return nil, ErrConnClosing + return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } if t.statsHandler != nil { @@ -796,6 +865,9 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) // This will unblock write. close(s.done) + if s.doneFunc != nil { + s.doneFunc() + } } // Close kicks off the shutdown process of the transport. This should be called @@ -805,12 +877,12 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // This method blocks until the addrConn that initiated this transport is // re-connected. This happens because t.onClose() begins reconnect logic at the // addrConn level and blocks until the addrConn is successfully connected. -func (t *http2Client) Close() error { +func (t *http2Client) Close(err error) { t.mu.Lock() // Make sure we only Close once. if t.state == closing { t.mu.Unlock() - return nil + return } // Call t.onClose before setting the state to closing to prevent the client // from attempting to create new streams ASAP. @@ -826,13 +898,23 @@ func (t *http2Client) Close() error { t.mu.Unlock() t.controlBuf.finish() t.cancel() - err := t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) + t.conn.Close() + channelz.RemoveEntry(t.channelzID) + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + _, goAwayDebugMessage := t.GetGoAwayReason() + + var st *status.Status + if len(goAwayDebugMessage) > 0 { + st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + err = st.Err() + } else { + st = status.New(codes.Unavailable, err.Error()) } + // Notify all active streams. for _, s := range streams { - t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false) + t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } if t.statsHandler != nil { connEnd := &stats.ConnEnd{ @@ -840,7 +922,6 @@ func (t *http2Client) Close() error { } t.statsHandler.HandleConn(t.ctx, connEnd) } - return err } // GracefulClose sets the state to draining, which prevents new streams from @@ -859,7 +940,7 @@ func (t *http2Client) GracefulClose() { active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close() + t.Close(ErrConnClosing) return } t.controlBuf.put(&incomingGoAway{}) @@ -1000,7 +1081,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) { } // The server has closed the stream without sending trailers. Record that // the read direction is closed, and set the status appropriately. - if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + if f.StreamEnded() { t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) } } @@ -1105,9 +1186,9 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { } } id := f.LastStreamID - if id > 0 && id%2 != 1 { + if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close() + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) return } // A client can receive multiple GoAways from the server (see @@ -1125,7 +1206,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // If there are multiple GoAways the first one should always have an ID greater than the following ones. if id > t.prevGoAwayID { t.mu.Unlock() - t.Close() + t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) return } default: @@ -1155,7 +1236,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close() + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) } } @@ -1171,12 +1252,17 @@ func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayTooManyPings } } + if len(f.DebugData()) == 0 { + t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode) + } else { + t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData())) + } } -func (t *http2Client) GetGoAwayReason() GoAwayReason { +func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { t.mu.Lock() defer t.mu.Unlock() - return t.goAwayReason + return t.goAwayReason, t.goAwayDebugMessage } func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { @@ -1203,35 +1289,128 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - state := &decodeState{} - // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode. - state.data.isGRPC = !initialHeader - if h2code, err := state.decodeHeader(frame); err != nil { - t.closeStream(s, err, true, h2code, status.Convert(err), nil, endStream) + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + se := status.New(codes.Internal, "peer header list size exceeded limit") + t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream) return } - isHeader := false - defer func() { - if t.statsHandler != nil { - if isHeader { - inHeader := &stats.InHeader{ - Client: true, - WireLength: int(frame.Header().Length), - Header: s.header.Copy(), - Compression: s.recvCompress, - } - t.statsHandler.HandleRPC(s.ctx, inHeader) - } else { - inTrailer := &stats.InTrailer{ - Client: true, - WireLength: int(frame.Header().Length), - Trailer: s.trailer.Copy(), - } - t.statsHandler.HandleRPC(s.ctx, inTrailer) + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = !initialHeader + mdata = make(map[string][]string) + contentTypeErr = "malformed header: missing HTTP content-type" + grpcMessage string + statusGen *status.Status + recvCompress string + httpStatusCode *int + httpStatusErr string + rawStatusCode = codes.Unknown + // headerError is set if an error is encountered while parsing the headers + headerError string + ) + + if initialHeader { + httpStatusErr = "malformed header: missing HTTP status" + } + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { + contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value) + break + } + contentTypeErr = "" + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + isGRPC = true + case "grpc-encoding": + recvCompress = hf.Value + case "grpc-status": + code, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + rawStatusCode = codes.Code(uint32(code)) + case "grpc-message": + grpcMessage = decodeGrpcMessage(hf.Value) + case "grpc-status-details-bin": + var err error + statusGen, err = decodeGRPCStatusDetails(hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) } + case ":status": + if hf.Value == "200" { + httpStatusErr = "" + statusCode := 200 + httpStatusCode = &statusCode + break + } + + c, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + statusCode := int(c) + httpStatusCode = &statusCode + + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err) + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) } - }() + } + + if !isGRPC || httpStatusErr != "" { + var code = codes.Internal // when header does not include HTTP status, return INTERNAL + + if httpStatusCode != nil { + var ok bool + code, ok = HTTPStatusConvTab[*httpStatusCode] + if !ok { + code = codes.Unknown + } + } + var errs []string + if httpStatusErr != "" { + errs = append(errs, httpStatusErr) + } + if contentTypeErr != "" { + errs = append(errs, contentTypeErr) + } + // Verify the HTTP response is a 200. + se := status.New(code, strings.Join(errs, "; ")) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + if headerError != "" { + se := status.New(codes.Internal, headerError) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + isHeader := false // If headerChan hasn't been closed yet if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { @@ -1242,9 +1421,9 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. - s.recvCompress = state.data.encoding - if len(state.data.mdata) > 0 { - s.header = state.data.mdata + s.recvCompress = recvCompress + if len(mdata) > 0 { + s.header = mdata } } else { // HEADERS frame block carries a Trailers-Only. @@ -1253,13 +1432,36 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { close(s.headerChan) } + if t.statsHandler != nil { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + Header: metadata.MD(mdata).Copy(), + Compression: s.recvCompress, + } + t.statsHandler.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + Trailer: metadata.MD(mdata).Copy(), + } + t.statsHandler.HandleRPC(s.ctx, inTrailer) + } + } + if !endStream { return } + if statusGen == nil { + statusGen = status.New(rawStatusCode, grpcMessage) + } + // if client received END_STREAM from server while stream was still active, send RST_STREAM rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true) + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) } // reader runs as a separate goroutine in charge of reading data from network @@ -1273,7 +1475,8 @@ func (t *http2Client) reader() { // Check the validity of server preface. frame, err := t.framer.fr.ReadFrame() if err != nil { - t.Close() // this kicks off resetTransport, so must be last before return + err = connectionErrorf(true, err, "error reading server preface: %v", err) + t.Close(err) // this kicks off resetTransport, so must be last before return return } t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) @@ -1282,7 +1485,8 @@ func (t *http2Client) reader() { } sf, ok := frame.(*http2.SettingsFrame) if !ok { - t.Close() // this kicks off resetTransport, so must be last before return + // this kicks off resetTransport, so must be last before return + t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)) return } t.onPrefaceReceipt() @@ -1318,7 +1522,7 @@ func (t *http2Client) reader() { continue } else { // Transport error. - t.Close() + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) return } } @@ -1352,7 +1556,7 @@ func minTime(a, b time.Duration) time.Duration { return b } -// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. +// keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { p := &ping{data: [8]byte{}} // True iff a ping has been sent, and no data has been received since then. @@ -1377,7 +1581,7 @@ func (t *http2Client) keepalive() { continue } if outstandingPing && timeoutLeft <= 0 { - t.Close() + t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) return } t.mu.Lock() diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 0cf1cc320..45d7bd145 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -21,11 +21,11 @@ package transport import ( "bytes" "context" - "errors" "fmt" "io" "math" "net" + "net/http" "strconv" "sync" "sync/atomic" @@ -35,6 +35,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -51,10 +52,10 @@ import ( var ( // ErrIllegalHeaderWrite indicates that setting header is illegal because of // the stream's state. - ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. - ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") ) // serverConnectionCounter counts the number of connections a server has seen @@ -72,7 +73,6 @@ type http2Server struct { writerDone chan struct{} // sync point to enable testing. remoteAddr net.Addr localAddr net.Addr - maxStreamID uint32 // max stream ID ever seen authInfo credentials.AuthInfo // auth info about the connection inTapHandle tap.ServerInHandle framer *framer @@ -101,11 +101,11 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when drain(...) is called the first time. + // drainChan is initialized when Drain() is called the first time. // After which the server writes out the first GoAway(with ID 2^31-1) frame. // Then an independent goroutine will be launched to later send the second GoAway. // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is + // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is // already underway. drainChan chan struct{} state transportState @@ -117,16 +117,42 @@ type http2Server struct { idle time.Time // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData bufferPool *bufferPool connectionID uint64 + + // maxStreamMu guards the maximum stream ID + // This lock may not be taken if mu is already held. + maxStreamMu sync.Mutex + maxStreamID uint32 // max stream ID ever seen } -// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is -// returned if something goes wrong. -func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { +// NewServerTransport creates a http2 transport with conn and configuration +// options from config. +// +// It returns a non-nil transport and a nil error on success. On failure, it +// returns a nil transport and a non-nil error. For a special case where the +// underlying conn gets closed before the client preface could be read, it +// returns a nil transport and a nil error. +func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + var authInfo credentials.AuthInfo + rawConn := conn + if config.Credentials != nil { + var err error + conn, authInfo, err = config.Credentials.ServerHandshake(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away + // from gRPC; those connections should be left open. io.EOF means + // the connection was closed before handshaking completed, which can + // happen naturally from probers. Return these errors directly. + if err == credentials.ErrConnDispatched || err == io.EOF { + return nil, err + } + return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + } + } writeBufSize := config.WriteBufferSize readBufSize := config.ReadBufferSize maxHeaderListSize := defaultServerMaxHeaderListSize @@ -205,18 +231,24 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err if kp.Timeout == 0 { kp.Timeout = defaultServerKeepaliveTimeout } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } kep := config.KeepalivePolicy if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime } + done := make(chan struct{}) t := &http2Server{ - ctx: context.Background(), + ctx: setConnection(context.Background(), rawConn), done: done, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), - authInfo: config.AuthInfo, + authInfo: authInfo, framer: framer, readerDone: make(chan struct{}), writerDone: make(chan struct{}), @@ -248,12 +280,12 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err connBegin := &stats.ConnBegin{} t.stats.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + if err != nil { + return nil, err } t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) - t.framer.writer.Flush() defer func() { @@ -265,6 +297,14 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err // Check the validity of client preface. preface := make([]byte, len(clientPreface)) if _, err := io.ReadFull(t.conn, preface); err != nil { + // In deployments where a gRPC server runs behind a cloud load balancer + // which performs regular TCP level health checks, the connection is + // closed immediately by the latter. Returning io.EOF here allows the + // grpc server implementation to recognize this scenario and suppress + // logging to reduce spam. + if err == io.EOF { + return nil, io.EOF + } return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) } if !bytes.Equal(preface, clientPreface) { @@ -294,6 +334,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err } } t.conn.Close() + t.controlBuf.finish() close(t.writerDone) }() go t.keepalive() @@ -302,38 +343,145 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err // operateHeader takes action on the decoded headers. func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { + // Acquire max stream ID lock for entire duration + t.maxStreamMu.Lock() + defer t.maxStreamMu.Unlock() + streamID := frame.Header().StreamID - state := &decodeState{ - serverSide: true, - } - if h2code, err := state.decodeHeader(frame); err != nil { - if _, ok := status.FromError(err); ok { - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: h2code, - onWrite: func() {}, - }) - } + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeFrameSize, + onWrite: func() {}, + }) return false } + if streamID%2 != 1 || streamID <= t.maxStreamID { + // illegal gRPC stream id. + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + } + return true + } + t.maxStreamID = streamID + buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, - recvCompress: state.data.encoding, - method: state.data.method, - contentSubtype: state.data.contentSubtype, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + } + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = false + mdata = make(map[string][]string) + httpMethod string + // headerError is set if an error is encountered while parsing the headers + headerError bool + + timeoutSet bool + timeout time.Duration + ) + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) + if !validContentType { + break + } + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + s.contentSubtype = contentSubtype + isGRPC = true + case "grpc-encoding": + s.recvCompress = hf.Value + case ":method": + httpMethod = hf.Value + case ":path": + s.method = hf.Value + case "grpc-timeout": + timeoutSet = true + var err error + if timeout, err = decodeTimeout(hf.Value); err != nil { + headerError = true + } + // "Transports must consider requests containing the Connection header + // as malformed." - A41 + case "connection": + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec") + } + headerError = true + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = true + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + // "If multiple Host headers or multiple :authority headers are present, the + // request must be rejected with an HTTP status code 400 as required by Host + // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM + // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2 + // error, this takes precedence over a client not speaking gRPC. + if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { + errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) + if logger.V(logLevel) { + logger.Errorf("transport: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 400, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) + return false } + + if !isGRPC || headerError { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + return false + } + + // "If :authority is missing, Host must be renamed to :authority." - A41 + if len(mdata[":authority"]) == 0 { + // No-op if host isn't present, no eventual :authority header is a valid + // RPC. + if host, ok := mdata["host"]; ok { + mdata[":authority"] = host + delete(mdata, "host") + } + } else { + // "If :authority is present, Host must be discarded" - A41 + delete(mdata, "host") + } + if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } - if state.data.timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout) + if timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) } else { s.ctx, s.cancel = context.WithCancel(t.ctx) } @@ -346,33 +494,13 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } s.ctx = peer.NewContext(s.ctx, pr) // Attach the received metadata to the context. - if len(state.data.mdata) > 0 { - s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata) - } - if state.data.statsTags != nil { - s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags) - } - if state.data.statsTrace != nil { - s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace) - } - if t.inTapHandle != nil { - var err error - info := &tap.Info{ - FullMethodName: state.data.method, + if len(mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, mdata) + if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { + s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) } - s.ctx, err = t.inTapHandle(s.ctx, info) - if err != nil { - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) - } - t.controlBuf.put(&cleanupStream{ - streamID: s.id, - rst: true, - rstCode: http2.ErrCodeRefusedStream, - onWrite: func() {}, - }) - s.cancel() - return false + if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { + s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) } } t.mu.Lock() @@ -392,16 +520,43 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.cancel() return false } - if streamID%2 != 1 || streamID <= t.maxStreamID { + if httpMethod != http.MethodPost { t.mu.Unlock() - // illegal gRPC stream id. + errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + logger.Infof("transport: %v", errMsg) } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) s.cancel() - return true + return false + } + if t.inTapHandle != nil { + var err error + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + t.mu.Unlock() + if logger.V(logLevel) { + logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + } + stat, ok := status.FromError(err) + if !ok { + stat = status.New(codes.PermissionDenied, err.Error()) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 200, + streamID: s.id, + contentSubtype: s.contentSubtype, + status: stat, + rst: !frame.StreamEnded(), + }) + return false + } } - t.maxStreamID = streamID t.activeStreams[streamID] = s if len(t.activeStreams) == 1 { t.idle = time.Time{} @@ -423,7 +578,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( LocalAddr: t.localAddr, Compression: s.recvCompress, WireLength: int(frame.Header().Length), - Header: metadata.MD(state.data.mdata).Copy(), + Header: metadata.MD(mdata).Copy(), } t.stats.HandleRPC(s.ctx, inHeader) } @@ -635,7 +790,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) { s.write(recvMsg{buffer: buffer}) } } - if f.Header().Flags.Has(http2.FlagDataEndStream) { + if f.StreamEnded() { // Received the end of stream from the client. s.compareAndSwapState(streamActive, streamReadDone) s.write(recvMsg{err: io.EOF}) @@ -779,11 +934,25 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { return true } +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + // WriteHeader sends the header metadata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - if s.updateHeaderSent() || s.getState() == streamDone { + if s.updateHeaderSent() { return ErrIllegalHeaderWrite } + + if s.getState() == streamDone { + return t.streamContextErr(s) + } + s.hdrMu.Lock() if md.Len() > 0 { if s.header.Len() > 0 { @@ -794,7 +963,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } if err := t.writeHeaderLocked(s); err != nil { s.hdrMu.Unlock() - return err + return status.Convert(err).Err() } s.hdrMu.Unlock() return nil @@ -910,23 +1079,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // TODO(mmukhi, dfawley): Make sure this is the right code to return. - return status.Errorf(codes.Internal, "transport: %v", err) + return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - // TODO(mmukhi, dfawley): Should the server write also return io.EOF? - s.cancel() - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } } df := &dataFrame{ @@ -936,12 +1094,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e onEachWrite: t.setResetPingStrikes, } if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } return t.controlBuf.put(df) } @@ -990,12 +1143,12 @@ func (t *http2Server) keepalive() { if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.drain(http2.ErrCodeNo, []byte{}) + t.Drain() return } idleTimer.Reset(val) case <-ageTimer.C: - t.drain(http2.ErrCodeNo, []byte{}) + t.Drain() ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { case <-ageTimer.C: @@ -1049,11 +1202,11 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() error { +func (t *http2Server) Close() { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return errors.New("transport: Close() was already called") + return } t.state = closing streams := t.activeStreams @@ -1061,10 +1214,10 @@ func (t *http2Server) Close() error { t.mu.Unlock() t.controlBuf.finish() close(t.done) - err := t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) + if err := t.conn.Close(); err != nil && logger.V(logLevel) { + logger.Infof("transport: error closing conn during Close: %v", err) } + channelz.RemoveEntry(t.channelzID) // Cancel all active streams. for _, s := range streams { s.cancel() @@ -1073,15 +1226,10 @@ func (t *http2Server) Close() error { connEnd := &stats.ConnEnd{} t.stats.HandleConn(t.ctx, connEnd) } - return err } // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1103,6 +1251,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { // finishStream closes the stream and puts the trailing headerFrame into controlbuf. func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + oldState := s.swapState(streamDone) if oldState == streamDone { // If the stream was already done, return. @@ -1122,6 +1275,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h // closeStream clears the footprint of a stream when the stream is not needed any more. func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + s.swapState(streamDone) t.deleteStream(s, eosReceived) @@ -1138,17 +1296,13 @@ func (t *http2Server) RemoteAddr() net.Addr { } func (t *http2Server) Drain() { - t.drain(http2.ErrCodeNo, []byte{}) -} - -func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { t.mu.Lock() defer t.mu.Unlock() if t.drainChan != nil { return } t.drainChan = make(chan struct{}) - t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} @@ -1156,20 +1310,23 @@ var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} // Handles outgoing GoAway and returns true if loopy needs to put itself // in draining mode. func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.maxStreamMu.Lock() t.mu.Lock() if t.state == closing { // TODO(mmukhi): This seems unnecessary. t.mu.Unlock() + t.maxStreamMu.Unlock() // The transport is closing. return false, ErrConnClosing } - sid := t.maxStreamID if !g.headsUp { // Stop accepting more streams now. t.state = draining + sid := t.maxStreamID if len(t.activeStreams) == 0 { g.closeConn = true } t.mu.Unlock() + t.maxStreamMu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } @@ -1182,6 +1339,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { return true, nil } t.mu.Unlock() + t.maxStreamMu.Unlock() // For a graceful close, send out a GoAway with stream ID of MaxUInt32, // Follow that with a ping and wait for the ack to come back or a timer // to expire. During this time accept new streams since they might have @@ -1266,3 +1424,18 @@ func getJitter(v time.Duration) time.Duration { j := grpcrand.Int63n(2*r) - r return time.Duration(j) } + +type connectionKey struct{} + +// GetConnection gets the connection from the context. +func GetConnection(ctx context.Context) net.Conn { + conn, _ := ctx.Value(connectionKey{}).(net.Conn) + return conn +} + +// SetConnection adds the connection to the context to be able to get +// information about the destination ip and port for an incoming RPC. This also +// allows any unary or streaming interceptors to see the connection. +func setConnection(ctx context.Context, conn net.Conn) context.Context { + return context.WithValue(ctx, connectionKey{}, conn) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 4d15afbf7..d8247bcdf 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -27,6 +27,7 @@ import ( "math" "net" "net/http" + "net/url" "strconv" "strings" "time" @@ -38,7 +39,6 @@ import ( spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/status" ) @@ -95,52 +95,6 @@ var ( logger = grpclog.Component("transport") ) -type parsedHeaderData struct { - encoding string - // statusGen caches the stream status received from the trailer the server - // sent. Client side only. Do not access directly. After all trailers are - // parsed, use the status method to retrieve the status. - statusGen *status.Status - // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not - // intended for direct access outside of parsing. - rawStatusCode *int - rawStatusMsg string - httpStatus *int - // Server side only fields. - timeoutSet bool - timeout time.Duration - method string - // key-value metadata map from the peer. - mdata map[string][]string - statsTags []byte - statsTrace []byte - contentSubtype string - - // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP). - // - // We are in gRPC mode (peer speaking gRPC) if: - // * We are client side and have already received a HEADER frame that indicates gRPC peer. - // * The header contains valid a content-type, i.e. a string starts with "application/grpc" - // And we should handle error specific to gRPC. - // - // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we - // are in HTTP fallback mode, and should handle error specific to HTTP. - isGRPC bool - grpcErr error - httpErr error - contentTypeErr string -} - -// decodeState configures decoding criteria and records the decoded data. -type decodeState struct { - // whether decoding on server side or not - serverSide bool - - // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS - // frame once decodeHeader function has been invoked and returned. - data parsedHeaderData -} - // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -178,14 +132,6 @@ func isWhitelistedHeader(hdr string) bool { } } -func (d *decodeState) status() *status.Status { - if d.data.statusGen == nil { - // No status-details were provided; generate status using code/msg. - d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg) - } - return d.data.statusGen -} - const binHdrSuffix = "-bin" func encodeBinHeader(v []byte) string { @@ -215,166 +161,16 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) (http2.ErrCode, error) { - // frame.Truncated is set to true when framer detects that the current header - // list size hits MaxHeaderListSize limit. - if frame.Truncated { - return http2.ErrCodeFrameSize, status.Error(codes.Internal, "peer header list size exceeded limit") - } - - for _, hf := range frame.Fields { - d.processHeaderField(hf) - } - - if d.data.isGRPC { - if d.data.grpcErr != nil { - return http2.ErrCodeProtocol, d.data.grpcErr - } - if d.serverSide { - return http2.ErrCodeNo, nil - } - if d.data.rawStatusCode == nil && d.data.statusGen == nil { - // gRPC status doesn't exist. - // Set rawStatusCode to be unknown and return nil error. - // So that, if the stream has ended this Unknown status - // will be propagated to the user. - // Otherwise, it will be ignored. In which case, status from - // a later trailer, that has StreamEnded flag set, is propagated. - code := int(codes.Unknown) - d.data.rawStatusCode = &code - } - return http2.ErrCodeNo, nil - } - - // HTTP fallback mode - if d.data.httpErr != nil { - return http2.ErrCodeProtocol, d.data.httpErr - } - - var ( - code = codes.Internal // when header does not include HTTP status, return INTERNAL - ok bool - ) - - if d.data.httpStatus != nil { - code, ok = HTTPStatusConvTab[*(d.data.httpStatus)] - if !ok { - code = codes.Unknown - } - } - - return http2.ErrCodeProtocol, status.Error(code, d.constructHTTPErrMsg()) -} - -// constructErrMsg constructs error message to be returned in HTTP fallback mode. -// Format: HTTP status code and its corresponding message + content-type error message. -func (d *decodeState) constructHTTPErrMsg() string { - var errMsgs []string - - if d.data.httpStatus == nil { - errMsgs = append(errMsgs, "malformed header: missing HTTP status") - } else { - errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus)) - } - - if d.data.contentTypeErr == "" { - errMsgs = append(errMsgs, "transport: missing content-type field") - } else { - errMsgs = append(errMsgs, d.data.contentTypeErr) - } - - return strings.Join(errMsgs, "; ") -} - -func (d *decodeState) addMetadata(k, v string) { - if d.data.mdata == nil { - d.data.mdata = make(map[string][]string) +func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { + v, err := decodeBinHeader(rawDetails) + if err != nil { + return nil, err } - d.data.mdata[k] = append(d.data.mdata[k], v) -} - -func (d *decodeState) processHeaderField(f hpack.HeaderField) { - switch f.Name { - case "content-type": - contentSubtype, validContentType := grpcutil.ContentSubtype(f.Value) - if !validContentType { - d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) - return - } - d.data.contentSubtype = contentSubtype - // TODO: do we want to propagate the whole content-type in the metadata, - // or come up with a way to just propagate the content-subtype if it was set? - // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} - // in the metadata? - d.addMetadata(f.Name, f.Value) - d.data.isGRPC = true - case "grpc-encoding": - d.data.encoding = f.Value - case "grpc-status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) - return - } - d.data.rawStatusCode = &code - case "grpc-message": - d.data.rawStatusMsg = decodeGrpcMessage(f.Value) - case "grpc-status-details-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return - } - s := &spb.Status{} - if err := proto.Unmarshal(v, s); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return - } - d.data.statusGen = status.FromProto(s) - case "grpc-timeout": - d.data.timeoutSet = true - var err error - if d.data.timeout, err = decodeTimeout(f.Value); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) - } - case ":path": - d.data.method = f.Value - case ":status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) - return - } - d.data.httpStatus = &code - case "grpc-tags-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) - return - } - d.data.statsTags = v - d.addMetadata(f.Name, string(v)) - case "grpc-trace-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) - return - } - d.data.statsTrace = v - d.addMetadata(f.Name, string(v)) - default: - if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { - break - } - v, err := decodeMetadataHeader(f.Name, f.Value) - if err != nil { - if logger.V(logLevel) { - logger.Errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) - } - return - } - d.addMetadata(f.Name, v) + st := &spb.Status{} + if err = proto.Unmarshal(v, st); err != nil { + return nil, err } + return status.FromProto(st), nil } type timeoutUnit uint8 @@ -598,3 +394,31 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) return f } + +// parseDialTarget returns the network and address to pass to dialer. +func parseDialTarget(target string) (string, string) { + net := "tcp" + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + return n, target[m1+1:] + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr := t.Path + if scheme == "unix" { + if addr == "" { + addr = t.Host + } + return scheme, addr + } + } + return net, target +} diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go new file mode 100644 index 000000000..c11b52782 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go @@ -0,0 +1,46 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package networktype declares the network type to be used in the default +// dialer. Attribute of a resolver.Address. +package networktype + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.internal.transport.networktype") + +// Set returns a copy of the provided address with attributes containing networkType. +func Set(address resolver.Address, networkType string) resolver.Address { + address.Attributes = address.Attributes.WithValue(key, networkType) + return address +} + +// Get returns the network type in the resolver.Address and true, or "", false +// if not present. +func Get(address resolver.Address) (string, bool) { + v := address.Attributes.Value(key) + if v == nil { + return "", false + } + return v.(string), true +} diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go similarity index 71% rename from vendor/google.golang.org/grpc/proxy.go rename to vendor/google.golang.org/grpc/internal/transport/proxy.go index f8f69bfb7..415961987 100644 --- a/vendor/google.golang.org/grpc/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -16,13 +16,12 @@ * */ -package grpc +package transport import ( "bufio" "context" "encoding/base64" - "errors" "fmt" "io" "net" @@ -34,13 +33,11 @@ import ( const proxyAuthHeaderKey = "Proxy-Authorization" var ( - // errDisabled indicates that proxy is disabled for the address. - errDisabled = errors.New("proxy is disabled for the address") // The following variable will be overwritten in the tests. httpProxyFromEnvironment = http.ProxyFromEnvironment ) -func mapAddress(ctx context.Context, address string) (*url.URL, error) { +func mapAddress(address string) (*url.URL, error) { req := &http.Request{ URL: &url.URL{ Scheme: "https", @@ -51,9 +48,6 @@ func mapAddress(ctx context.Context, address string) (*url.URL, error) { if err != nil { return nil, err } - if url == nil { - return nil, errDisabled - } return url, nil } @@ -76,7 +70,7 @@ func basicAuth(username, password string) string { return base64.StdEncoding.EncodeToString([]byte(auth)) } -func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) { +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) { defer func() { if err != nil { conn.Close() @@ -115,32 +109,28 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri return &bufConn{Conn: conn, r: r}, nil } -// newProxyDialer returns a dialer that connects to proxy first if necessary. -// The returned dialer checks if a proxy is necessary, dial to the proxy with the -// provided dialer, does HTTP CONNECT handshake and returns the connection. -func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { - return func(ctx context.Context, addr string) (conn net.Conn, err error) { - var newAddr string - proxyURL, err := mapAddress(ctx, addr) - if err != nil { - if err != errDisabled { - return nil, err - } - newAddr = addr - } else { - newAddr = proxyURL.Host - } +// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy +// is necessary, dials, does the HTTP CONNECT handshake, and returns the +// connection. +func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { + newAddr := addr + proxyURL, err := mapAddress(addr) + if err != nil { + return nil, err + } + if proxyURL != nil { + newAddr = proxyURL.Host + } - conn, err = dialer(ctx, newAddr) - if err != nil { - return - } - if proxyURL != nil { - // proxy is disabled if proxyURL is nil. - conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL) - } + conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) + if err != nil { return } + if proxyURL != nil { + // proxy is disabled if proxyURL is nil. + conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + } + return } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index b74030a96..a9ce717f1 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -30,9 +30,11 @@ import ( "net" "sync" "sync/atomic" + "time" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -241,6 +243,7 @@ type Stream struct { ctx context.Context // the associated context of the stream cancel context.CancelFunc // always nil for client side Stream done chan struct{} // closed at the end of stream to unblock writers. On the client side. + doneFunc func() // invoked at the end of stream on client side. ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) method string // the associated RPC method of the stream recvCompress string @@ -517,7 +520,8 @@ const ( // ServerConfig consists of all the configurations to establish a server transport. type ServerConfig struct { MaxStreams uint32 - AuthInfo credentials.AuthInfo + ConnectionTimeout time.Duration + Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle StatsHandler stats.Handler KeepaliveParams keepalive.ServerParameters @@ -526,17 +530,11 @@ type ServerConfig struct { InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 } -// NewServerTransport creates a ServerTransport with conn or non-nil error -// if it fails. -func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { - return newHTTP2Server(conn, config) -} - // ConnectOptions covers all relevant options for communicating with the server. type ConnectOptions struct { // UserAgent is the application user agent. @@ -566,9 +564,11 @@ type ConnectOptions struct { // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 + // UseProxy specifies if a proxy should be used. + UseProxy bool } // NewClientTransport establishes the transport with the required ConnectOptions @@ -609,6 +609,8 @@ type CallHdr struct { ContentSubtype string PreviousAttempts int // value of grpc-previous-rpc-attempts header to set + + DoneFunc func() // called when the stream is finished } // ClientTransport is the common interface for all gRPC client-side transport @@ -617,7 +619,7 @@ type ClientTransport interface { // Close tears down this transport. Once it returns, the transport // should not be accessed any more. The caller must make sure this // is called only once. - Close() error + Close(err error) // GracefulClose starts to tear down the transport: the transport will stop // accepting new RPCs and NewStream will return error. Once all streams are @@ -651,8 +653,9 @@ type ClientTransport interface { // HTTP/2). GoAway() <-chan struct{} - // GetGoAwayReason returns the reason why GoAway frame was received. - GetGoAwayReason() GoAwayReason + // GetGoAwayReason returns the reason why GoAway frame was received, along + // with a human readable string with debug info. + GetGoAwayReason() (GoAwayReason, string) // RemoteAddr returns the remote network address. RemoteAddr() net.Addr @@ -688,7 +691,7 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() error + Close() // RemoteAddr returns the remote network address. RemoteAddr() net.Addr @@ -739,6 +742,12 @@ func (e ConnectionError) Origin() error { return e.err } +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go new file mode 100644 index 000000000..e8b492774 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go @@ -0,0 +1,40 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package internal + +import ( + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/resolver" +) + +// handshakeClusterNameKey is the type used as the key to store cluster name in +// the Attributes field of resolver.Address. +type handshakeClusterNameKey struct{} + +// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field +// is updated with the cluster name. +func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName) + return addr +} + +// GetXDSHandshakeClusterName returns cluster name stored in attr. +func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) { + v := attr.Value(handshakeClusterNameKey{}) + name, ok := v.(string) + return name, ok +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index cf6d1b947..8e0f6abe8 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -75,13 +75,9 @@ func Pairs(kv ...string) MD { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) } md := MD{} - var key string - for i, s := range kv { - if i%2 == 0 { - key = strings.ToLower(s) - continue - } - md[key] = append(md[key], s) + for i := 0; i < len(kv); i += 2 { + key := strings.ToLower(kv[i]) + md[key] = append(md[key], kv[i+1]) } return md } @@ -97,12 +93,16 @@ func (md MD) Copy() MD { } // Get obtains the values for a given key. +// +// k is converted to lowercase before searching in md. func (md MD) Get(k string) []string { k = strings.ToLower(k) return md[k] } // Set sets the value of a given key with a slice of values. +// +// k is converted to lowercase before storing in md. func (md MD) Set(k string, vals ...string) { if len(vals) == 0 { return @@ -111,7 +111,10 @@ func (md MD) Set(k string, vals ...string) { md[k] = vals } -// Append adds the values to key k, not overwriting what was already stored at that key. +// Append adds the values to key k, not overwriting what was already stored at +// that key. +// +// k is converted to lowercase before storing in md. func (md MD) Append(k string, vals ...string) { if len(vals) == 0 { return @@ -120,9 +123,17 @@ func (md MD) Append(k string, vals ...string) { md[k] = append(md[k], vals...) } +// Delete removes the values for a given key k which is converted to lowercase +// before removing it from md. +func (md MD) Delete(k string) { + k = strings.ToLower(k) + delete(md, k) +} + // Join joins any number of mds into a single MD. -// The order of values for each key is determined by the order in which -// the mds containing those values are presented to Join. +// +// The order of values for each key is determined by the order in which the mds +// containing those values are presented to Join. func Join(mds ...MD) MD { out := MD{} for _, md := range mds { @@ -149,8 +160,8 @@ func NewOutgoingContext(ctx context.Context, md MD) context.Context { } // AppendToOutgoingContext returns a new context with the provided kv merged -// with any existing metadata in the context. Please refer to the -// documentation of Pairs for a description of kv. +// with any existing metadata in the context. Please refer to the documentation +// of Pairs for a description of kv. func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) @@ -163,20 +174,36 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) } -// FromIncomingContext returns the incoming metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. -func FromIncomingContext(ctx context.Context) (md MD, ok bool) { - md, ok = ctx.Value(mdIncomingKey{}).(MD) - return +// FromIncomingContext returns the incoming metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromIncomingContext(ctx context.Context) (MD, bool) { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil, false + } + out := MD{} + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + s := make([]string, len(v)) + copy(s, v) + out[key] = s + } + return out, true } -// FromOutgoingContextRaw returns the un-merged, intermediary contents -// of rawMD. Remember to perform strings.ToLower on the keys. The returned -// MD should not be modified. Writing to it may cause races. Modification -// should be made to copies of the returned MD. +// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // -// This is intended for gRPC-internal use ONLY. +// Remember to perform strings.ToLower on the keys, for both the returned MD (MD +// is a map, there's no guarantee it's created using our helper functions) and +// the extra kv pairs (AppendToOutgoingContext doesn't turn them into +// lowercase). +// +// This is intended for gRPC-internal use ONLY. Users should use +// FromOutgoingContext instead. func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { @@ -186,21 +213,36 @@ func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { return raw.md, raw.added, true } -// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. func FromOutgoingContext(ctx context.Context) (MD, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, false } - mds := make([]MD, 0, len(raw.added)+1) - mds = append(mds, raw.md) - for _, vv := range raw.added { - mds = append(mds, Pairs(vv...)) + out := MD{} + for k, v := range raw.md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + s := make([]string, len(v)) + copy(s, v) + out[key] = s + } + for _, added := range raw.added { + if len(added)%2 == 1 { + panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) + } + + for i := 0; i < len(added); i += 2 { + key := strings.ToLower(added[i]) + out[key] = append(out[key], added[i+1]) + } } - return Join(mds...), ok + return out, ok } type rawMD struct { diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index a58174b6f..843633c91 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -131,7 +131,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } if _, ok := status.FromError(err); ok { // Status error: end the RPC unconditionally with this status. - return nil, nil, err + return nil, nil, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -144,10 +144,10 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. acw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { - logger.Error("subconn returned from pick is not *acBalancerWrapper") + logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) continue } - if t, ok := acw.getAddrConn().getReadyTransport(); ok { + if t := acw.getAddrConn().getReadyTransport(); t != nil { if channelz.IsOn() { return t, doneChannelzWrapper(acw, pickResult.Done), nil } @@ -175,3 +175,9 @@ func (pw *pickerWrapper) close() { pw.done = true close(pw.blockingCh) } + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { + error +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index 56e33f6c7..fb7a99e0a 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -44,77 +44,107 @@ func (*pickfirstBuilder) Name() string { } type pickfirstBalancer struct { - state connectivity.State - cc balancer.ClientConn - sc balancer.SubConn + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn } func (b *pickfirstBalancer) ResolverError(err error) { - switch b.state { - case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: - // Set a failing picker if we don't have a good picker. - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) - } if logger.V(2) { logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) } + if b.subConn == nil { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) } -func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { - if len(cs.ResolverState.Addresses) == 0 { +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if len(state.ResolverState.Addresses) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Remove the old subConn. All addresses were removed, so it is no longer + // valid. + b.cc.RemoveSubConn(b.subConn) + b.subConn = nil + } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if b.sc == nil { - var err error - b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) - if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, - }) - return balancer.ErrBadResolverState + + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) + return nil + } + + subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) } - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) - b.sc.Connect() - } else { - b.sc.UpdateAddresses(cs.ResolverState.Addresses) - b.sc.Connect() + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, + }) + b.subConn.Connect() return nil } -func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) } - if b.sc != sc { + if b.subConn != subConn { if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") } return } - b.state = s.ConnectivityState - if s.ConnectivityState == connectivity.Shutdown { - b.sc = nil + b.state = state.ConnectivityState + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil return } - switch s.ConnectivityState { - case connectivity.Ready, connectivity.Idle: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + switch state.ConnectivityState { + case connectivity.Ready: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) case connectivity.Connecting: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + case connectivity.Idle: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) case connectivity.TransientFailure: b.cc.UpdateState(balancer.State{ - ConnectivityState: s.ConnectivityState, - Picker: &picker{err: s.ConnectionError}, + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, }) } } @@ -122,15 +152,32 @@ func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.S func (b *pickfirstBalancer) Close() { } +func (b *pickfirstBalancer) ExitIdle() { + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() + } +} + type picker struct { result balancer.PickResult err error } -func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return p.result, p.err } +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + subConn balancer.SubConn +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + func init() { balancer.Register(newPickfirstBuilder()) } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index 6b8e0b77f..1f859f764 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.25.0 -// protoc v3.3.0 +// protoc v3.14.0 // source: reflection/grpc_reflection_v1alpha/reflection.proto package grpc_reflection_v1alpha diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go index 47f2ea552..4e6a6b1a8 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.14.0 +// source: reflection/grpc_reflection_v1alpha/reflection.proto package grpc_reflection_v1alpha @@ -11,6 +15,7 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 // ServerReflectionClient is the client API for ServerReflection service. @@ -31,7 +36,7 @@ func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClie } func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { - stream, err := c.cc.NewStream(ctx, &_ServerReflection_serviceDesc.Streams[0], "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...) + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...) if err != nil { return nil, err } @@ -85,8 +90,8 @@ type UnsafeServerReflectionServer interface { mustEmbedUnimplementedServerReflectionServer() } -func RegisterServerReflectionServer(s *grpc.Server, srv ServerReflectionServer) { - s.RegisterService(&_ServerReflection_serviceDesc, srv) +func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + s.RegisterService(&ServerReflection_ServiceDesc, srv) } func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { @@ -115,7 +120,10 @@ func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRe return m, nil } -var _ServerReflection_serviceDesc = grpc.ServiceDesc{ +// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ServerReflection_ServiceDesc = grpc.ServiceDesc{ ServiceName: "grpc.reflection.v1alpha.ServerReflection", HandlerType: (*ServerReflectionServer)(nil), Methods: []grpc.MethodDesc{}, diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go index d2696168b..81344abd7 100644 --- a/vendor/google.golang.org/grpc/reflection/serverreflection.go +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -37,351 +37,192 @@ To register server reflection on a gRPC server: package reflection // import "google.golang.org/grpc/reflection" import ( - "bytes" - "compress/gzip" - "fmt" "io" - "io/ioutil" - "reflect" "sort" - "sync" - "github.com/golang/protobuf/proto" - dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" "google.golang.org/grpc" "google.golang.org/grpc/codes" rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" ) -type serverReflectionServer struct { - rpb.UnimplementedServerReflectionServer - s *grpc.Server - - initSymbols sync.Once - serviceNames []string - symbols map[string]*dpb.FileDescriptorProto // map of fully-qualified names to files -} - -// Register registers the server reflection service on the given gRPC server. -func Register(s *grpc.Server) { - rpb.RegisterServerReflectionServer(s, &serverReflectionServer{ - s: s, - }) -} - -// protoMessage is used for type assertion on proto messages. -// Generated proto message implements function Descriptor(), but Descriptor() -// is not part of interface proto.Message. This interface is needed to -// call Descriptor(). -type protoMessage interface { - Descriptor() ([]byte, []int) -} - -func (s *serverReflectionServer) getSymbols() (svcNames []string, symbolIndex map[string]*dpb.FileDescriptorProto) { - s.initSymbols.Do(func() { - serviceInfo := s.s.GetServiceInfo() - - s.symbols = map[string]*dpb.FileDescriptorProto{} - s.serviceNames = make([]string, 0, len(serviceInfo)) - processed := map[string]struct{}{} - for svc, info := range serviceInfo { - s.serviceNames = append(s.serviceNames, svc) - fdenc, ok := parseMetadata(info.Metadata) - if !ok { - continue - } - fd, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - s.processFile(fd, processed) - } - sort.Strings(s.serviceNames) - }) - - return s.serviceNames, s.symbols -} - -func (s *serverReflectionServer) processFile(fd *dpb.FileDescriptorProto, processed map[string]struct{}) { - filename := fd.GetName() - if _, ok := processed[filename]; ok { - return - } - processed[filename] = struct{}{} - - prefix := fd.GetPackage() - - for _, msg := range fd.MessageType { - s.processMessage(fd, prefix, msg) - } - for _, en := range fd.EnumType { - s.processEnum(fd, prefix, en) - } - for _, ext := range fd.Extension { - s.processField(fd, prefix, ext) - } - for _, svc := range fd.Service { - svcName := fqn(prefix, svc.GetName()) - s.symbols[svcName] = fd - for _, meth := range svc.Method { - name := fqn(svcName, meth.GetName()) - s.symbols[name] = fd - } - } - - for _, dep := range fd.Dependency { - fdenc := proto.FileDescriptor(dep) - fdDep, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - s.processFile(fdDep, processed) - } -} - -func (s *serverReflectionServer) processMessage(fd *dpb.FileDescriptorProto, prefix string, msg *dpb.DescriptorProto) { - msgName := fqn(prefix, msg.GetName()) - s.symbols[msgName] = fd - - for _, nested := range msg.NestedType { - s.processMessage(fd, msgName, nested) - } - for _, en := range msg.EnumType { - s.processEnum(fd, msgName, en) - } - for _, ext := range msg.Extension { - s.processField(fd, msgName, ext) - } - for _, fld := range msg.Field { - s.processField(fd, msgName, fld) - } - for _, oneof := range msg.OneofDecl { - oneofName := fqn(msgName, oneof.GetName()) - s.symbols[oneofName] = fd - } +// GRPCServer is the interface provided by a gRPC server. It is implemented by +// *grpc.Server, but could also be implemented by other concrete types. It acts +// as a registry, for accumulating the services exposed by the server. +type GRPCServer interface { + grpc.ServiceRegistrar + ServiceInfoProvider } -func (s *serverReflectionServer) processEnum(fd *dpb.FileDescriptorProto, prefix string, en *dpb.EnumDescriptorProto) { - enName := fqn(prefix, en.GetName()) - s.symbols[enName] = fd - - for _, val := range en.Value { - valName := fqn(enName, val.GetName()) - s.symbols[valName] = fd - } -} +var _ GRPCServer = (*grpc.Server)(nil) -func (s *serverReflectionServer) processField(fd *dpb.FileDescriptorProto, prefix string, fld *dpb.FieldDescriptorProto) { - fldName := fqn(prefix, fld.GetName()) - s.symbols[fldName] = fd +// Register registers the server reflection service on the given gRPC server. +func Register(s GRPCServer) { + svr := NewServer(ServerOptions{Services: s}) + rpb.RegisterServerReflectionServer(s, svr) } -func fqn(prefix, name string) string { - if prefix == "" { - return name - } - return prefix + "." + name +// ServiceInfoProvider is an interface used to retrieve metadata about the +// services to expose. +// +// The reflection service is only interested in the service names, but the +// signature is this way so that *grpc.Server implements it. So it is okay +// for a custom implementation to return zero values for the +// grpc.ServiceInfo values in the map. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServiceInfoProvider interface { + GetServiceInfo() map[string]grpc.ServiceInfo } -// fileDescForType gets the file descriptor for the given type. -// The given type should be a proto message. -func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDescriptorProto, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(protoMessage) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) - } - enc, _ := m.Descriptor() - - return decodeFileDesc(enc) +// ExtensionResolver is the interface used to query details about extensions. +// This interface is satisfied by protoregistry.GlobalTypes. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ExtensionResolver interface { + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) } -// decodeFileDesc does decompression and unmarshalling on the given -// file descriptor byte slice. -func decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) { - raw, err := decompress(enc) - if err != nil { - return nil, fmt.Errorf("failed to decompress enc: %v", err) - } - - fd := new(dpb.FileDescriptorProto) - if err := proto.Unmarshal(raw, fd); err != nil { - return nil, fmt.Errorf("bad descriptor: %v", err) - } - return fd, nil +// ServerOptions represents the options used to construct a reflection server. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerOptions struct { + // The source of advertised RPC services. If not specified, the reflection + // server will report an empty list when asked to list services. + // + // This value will typically be a *grpc.Server. But the set of advertised + // services can be customized by wrapping a *grpc.Server or using an + // alternate implementation that returns a custom set of service names. + Services ServiceInfoProvider + // Optional resolver used to load descriptors. If not specified, + // protoregistry.GlobalFiles will be used. + DescriptorResolver protodesc.Resolver + // Optional resolver used to query for known extensions. If not specified, + // protoregistry.GlobalTypes will be used. + ExtensionResolver ExtensionResolver } -// decompress does gzip decompression. -func decompress(b []byte) ([]byte, error) { - r, err := gzip.NewReader(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("bad gzipped descriptor: %v", err) +// NewServer returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. +// +// Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServer(opts ServerOptions) rpb.ServerReflectionServer { + if opts.DescriptorResolver == nil { + opts.DescriptorResolver = protoregistry.GlobalFiles + } + if opts.ExtensionResolver == nil { + opts.ExtensionResolver = protoregistry.GlobalTypes + } + return &serverReflectionServer{ + s: opts.Services, + descResolver: opts.DescriptorResolver, + extResolver: opts.ExtensionResolver, } - out, err := ioutil.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("bad gzipped descriptor: %v", err) - } - return out, nil } -func typeForName(name string) (reflect.Type, error) { - pt := proto.MessageType(name) - if pt == nil { - return nil, fmt.Errorf("unknown type: %q", name) - } - st := pt.Elem() - - return st, nil -} - -func fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) - } - - var extDesc *proto.ExtensionDesc - for id, desc := range proto.RegisteredExtensions(m) { - if id == ext { - extDesc = desc - break - } - } - - if extDesc == nil { - return nil, fmt.Errorf("failed to find registered extension for extension number %v", ext) - } - - return decodeFileDesc(proto.FileDescriptor(extDesc.Filename)) -} - -func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) - } - - exts := proto.RegisteredExtensions(m) - out := make([]int32, 0, len(exts)) - for id := range exts { - out = append(out, id) - } - return out, nil +type serverReflectionServer struct { + rpb.UnimplementedServerReflectionServer + s ServiceInfoProvider + descResolver protodesc.Resolver + extResolver ExtensionResolver } // fileDescWithDependencies returns a slice of serialized fileDescriptors in // wire format ([]byte). The fileDescriptors will include fd and all the // transitive dependencies of fd with names not in sentFileDescriptors. -func fileDescWithDependencies(fd *dpb.FileDescriptorProto, sentFileDescriptors map[string]bool) ([][]byte, error) { - r := [][]byte{} - queue := []*dpb.FileDescriptorProto{fd} +func (s *serverReflectionServer) fileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) { + var r [][]byte + queue := []protoreflect.FileDescriptor{fd} for len(queue) > 0 { currentfd := queue[0] queue = queue[1:] - if sent := sentFileDescriptors[currentfd.GetName()]; len(r) == 0 || !sent { - sentFileDescriptors[currentfd.GetName()] = true - currentfdEncoded, err := proto.Marshal(currentfd) + if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent { + sentFileDescriptors[currentfd.Path()] = true + fdProto := protodesc.ToFileDescriptorProto(currentfd) + currentfdEncoded, err := proto.Marshal(fdProto) if err != nil { return nil, err } r = append(r, currentfdEncoded) } - for _, dep := range currentfd.Dependency { - fdenc := proto.FileDescriptor(dep) - fdDep, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - queue = append(queue, fdDep) + for i := 0; i < currentfd.Imports().Len(); i++ { + queue = append(queue, currentfd.Imports().Get(i)) } } return r, nil } -// fileDescEncodingByFilename finds the file descriptor for given filename, -// finds all of its previously unsent transitive dependencies, does marshalling -// on them, and returns the marshalled result. -func (s *serverReflectionServer) fileDescEncodingByFilename(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { - enc := proto.FileDescriptor(name) - if enc == nil { - return nil, fmt.Errorf("unknown file: %v", name) - } - fd, err := decodeFileDesc(enc) - if err != nil { - return nil, err - } - return fileDescWithDependencies(fd, sentFileDescriptors) -} - -// parseMetadata finds the file descriptor bytes specified meta. -// For SupportPackageIsVersion4, m is the name of the proto file, we -// call proto.FileDescriptor to get the byte slice. -// For SupportPackageIsVersion3, m is a byte slice itself. -func parseMetadata(meta interface{}) ([]byte, bool) { - // Check if meta is the file name. - if fileNameForMeta, ok := meta.(string); ok { - return proto.FileDescriptor(fileNameForMeta), true - } - - // Check if meta is the byte slice. - if enc, ok := meta.([]byte); ok { - return enc, true - } - - return nil, false -} - // fileDescEncodingContainingSymbol finds the file descriptor containing the // given symbol, finds all of its previously unsent transitive dependencies, // does marshalling on them, and returns the marshalled result. The given symbol // can be a type, a service or a method. func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { - _, symbols := s.getSymbols() - fd := symbols[name] - if fd == nil { - // Check if it's a type name that was not present in the - // transitive dependencies of the registered services. - if st, err := typeForName(name); err == nil { - fd, err = s.fileDescForType(st) - if err != nil { - return nil, err - } - } - } - - if fd == nil { - return nil, fmt.Errorf("unknown symbol: %v", name) + d, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)) + if err != nil { + return nil, err } - - return fileDescWithDependencies(fd, sentFileDescriptors) + return s.fileDescWithDependencies(d.ParentFile(), sentFileDescriptors) } // fileDescEncodingContainingExtension finds the file descriptor containing // given extension, finds all of its previously unsent transitive dependencies, // does marshalling on them, and returns the marshalled result. func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) { - st, err := typeForName(typeName) - if err != nil { - return nil, err - } - fd, err := fileDescContainingExtension(st, extNum) + xt, err := s.extResolver.FindExtensionByNumber(protoreflect.FullName(typeName), protoreflect.FieldNumber(extNum)) if err != nil { return nil, err } - return fileDescWithDependencies(fd, sentFileDescriptors) + return s.fileDescWithDependencies(xt.TypeDescriptor().ParentFile(), sentFileDescriptors) } // allExtensionNumbersForTypeName returns all extension numbers for the given type. func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) { - st, err := typeForName(name) - if err != nil { - return nil, err + var numbers []int32 + s.extResolver.RangeExtensionsByMessage(protoreflect.FullName(name), func(xt protoreflect.ExtensionType) bool { + numbers = append(numbers, int32(xt.TypeDescriptor().Number())) + return true + }) + sort.Slice(numbers, func(i, j int) bool { + return numbers[i] < numbers[j] + }) + if len(numbers) == 0 { + // maybe return an error if given type name is not known + if _, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)); err != nil { + return nil, err + } } - extNums, err := s.allExtensionNumbersForType(st) - if err != nil { - return nil, err + return numbers, nil +} + +// listServices returns the names of services this server exposes. +func (s *serverReflectionServer) listServices() []*rpb.ServiceResponse { + serviceInfo := s.s.GetServiceInfo() + resp := make([]*rpb.ServiceResponse, 0, len(serviceInfo)) + for svc := range serviceInfo { + resp = append(resp, &rpb.ServiceResponse{Name: svc}) } - return extNums, nil + sort.Slice(resp, func(i, j int) bool { + return resp[i].Name < resp[j].Name + }) + return resp } // ServerReflectionInfo is the reflection service handler. @@ -402,7 +243,11 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflectio } switch req := in.MessageRequest.(type) { case *rpb.ServerReflectionRequest_FileByFilename: - b, err := s.fileDescEncodingByFilename(req.FileByFilename, sentFileDescriptors) + var b [][]byte + fd, err := s.descResolver.FindFileByPath(req.FileByFilename) + if err == nil { + b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) + } if err != nil { out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ ErrorResponse: &rpb.ErrorResponse{ @@ -463,16 +308,9 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflectio } } case *rpb.ServerReflectionRequest_ListServices: - svcNames, _ := s.getSymbols() - serviceResponses := make([]*rpb.ServiceResponse, len(svcNames)) - for i, n := range svcNames { - serviceResponses[i] = &rpb.ServiceResponse{ - Name: n, - } - } out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{ ListServicesResponse: &rpb.ListServiceResponse{ - Service: serviceResponses, + Service: s.listServices(), }, } default: diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index 3443ad975..978b89f37 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -27,9 +27,9 @@ export PATH=${GOBIN}:${PATH} mkdir -p ${GOBIN} echo "remove existing generated files" -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go') +# grpc_testing_not_regenerate/*.pb.go is not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" (cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) @@ -40,29 +40,14 @@ echo "go install cmd/protoc-gen-go-grpc" echo "git clone https://github.com/grpc/grpc-proto" git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto +echo "git clone https://github.com/protocolbuffers/protobuf" +git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf + # Pull in code.proto as a proto dependency mkdir -p ${WORKDIR}/googleapis/google/rpc echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto -# Pull in the following repos to build the MeshCA config proto. -ENVOY_API_REPOS=( - "https://github.com/envoyproxy/data-plane-api" - "https://github.com/cncf/udpa" - "https://github.com/envoyproxy/protoc-gen-validate" -) -for repo in ${ENVOY_API_REPOS[@]}; do - dirname=$(basename ${repo}) - mkdir -p ${WORKDIR}/${dirname} - echo "git clone ${repo}" - git clone --quiet ${repo} ${WORKDIR}/${dirname} -done - -# Pull in the MeshCA service proto. -mkdir -p ${WORKDIR}/istio/istio/google/security/meshca/v1 -echo "curl https://raw.githubusercontent.com/istio/istio/master/security/proto/providers/google/meshca.proto" -curl --silent https://raw.githubusercontent.com/istio/istio/master/security/proto/providers/google/meshca.proto > ${WORKDIR}/istio/istio/google/security/meshca/v1/meshca.proto - mkdir -p ${WORKDIR}/out # Generates sources without the embed requirement @@ -84,15 +69,28 @@ SOURCES=( ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto - ${WORKDIR}/grpc-proto/grpc/tls/provider/meshca/experimental/config.proto - ${WORKDIR}/istio/istio/google/security/meshca/v1/meshca.proto + ${WORKDIR}/grpc-proto/grpc/testing/*.proto + ${WORKDIR}/grpc-proto/grpc/core/*.proto ) # These options of the form 'Mfoo.proto=bar' instruct the codegen to use an # import path of 'bar' in the generated code when 'foo.proto' is imported in # one of the sources. +# +# Note that the protos listed here are all for testing purposes. All protos to +# be used externally should have a go_package option (and they don't need to be +# listed here). OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\ -Menvoy/config/core/v3/config_source.proto=github.com/envoyproxy/go-control-plane/envoy/config/core/v3 +Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing for src in ${SOURCES[@]}; do echo "protoc ${src}" @@ -100,10 +98,7 @@ for src in ${SOURCES[@]}; do -I"." \ -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ - -I${WORKDIR}/data-plane-api \ - -I${WORKDIR}/udpa \ - -I${WORKDIR}/protoc-gen-validate \ - -I${WORKDIR}/istio \ + -I${WORKDIR}/protobuf/src \ ${src} done @@ -113,27 +108,24 @@ for src in ${LEGACY_SOURCES[@]}; do -I"." \ -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ - -I${WORKDIR}/data-plane-api \ - -I${WORKDIR}/udpa \ - -I${WORKDIR}/protoc-gen-validate \ - -I${WORKDIR}/istio \ + -I${WORKDIR}/protobuf/src \ ${src} done # The go_package option in grpc/lookup/v1/rls.proto doesn't match the # current location. Move it into the right place. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 -mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 +mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go +# grpc_testing_not_regenerate/*.pb.go are not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go # grpc/service_config/service_config.proto does not have a go_package option. mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config -# istio/google/security/meshca/v1/meshca.proto does not have a go_package option. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/ -mv ${WORKDIR}/out/istio/google/security/meshca/v1/* ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/ +# grpc/testing does not have a go_package option. +mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ +mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go new file mode 100644 index 000000000..e87ecd0ee --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -0,0 +1,109 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +type addressMapEntry struct { + addr Address + value interface{} +} + +// AddressMap is a map of addresses to arbitrary values taking into account +// Attributes. BalancerAttributes are ignored, as are Metadata and Type. +// Multiple accesses may not be performed concurrently. Must be created via +// NewAddressMap; do not construct directly. +type AddressMap struct { + m map[string]addressMapEntryList +} + +type addressMapEntryList []*addressMapEntry + +// NewAddressMap creates a new AddressMap. +func NewAddressMap() *AddressMap { + return &AddressMap{m: make(map[string]addressMapEntryList)} +} + +// find returns the index of addr in the addressMapEntry slice, or -1 if not +// present. +func (l addressMapEntryList) find(addr Address) int { + if len(l) == 0 { + return -1 + } + for i, entry := range l { + if entry.addr.ServerName == addr.ServerName && + entry.addr.Attributes.Equal(addr.Attributes) { + return i + } + } + return -1 +} + +// Get returns the value for the address in the map, if present. +func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { + entryList := a.m[addr.Addr] + if entry := entryList.find(addr); entry != -1 { + return entryList[entry].value, true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (a *AddressMap) Set(addr Address, value interface{}) { + entryList := a.m[addr.Addr] + if entry := entryList.find(addr); entry != -1 { + a.m[addr.Addr][entry].value = value + return + } + a.m[addr.Addr] = append(a.m[addr.Addr], &addressMapEntry{addr: addr, value: value}) +} + +// Delete removes addr from the map. +func (a *AddressMap) Delete(addr Address) { + entryList := a.m[addr.Addr] + entry := entryList.find(addr) + if entry == -1 { + return + } + if len(entryList) == 1 { + entryList = nil + } else { + copy(entryList[entry:], entryList[entry+1:]) + entryList = entryList[:len(entryList)-1] + } + a.m[addr.Addr] = entryList +} + +// Len returns the number of entries in the map. +func (a *AddressMap) Len() int { + ret := 0 + for _, entryList := range a.m { + ret += len(entryList) + } + return ret +} + +// Keys returns a slice of all current map keys. +func (a *AddressMap) Keys() []Address { + ret := make([]Address, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.addr) + } + } + return ret +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index e9fa8e33d..ca2e35a35 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -23,9 +23,11 @@ package resolver import ( "context" "net" + "net/url" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" ) @@ -116,9 +118,14 @@ type Address struct { ServerName string // Attributes contains arbitrary data about this address intended for - // consumption by the load balancing policy. + // consumption by the SubConn. Attributes *attributes.Attributes + // BalancerAttributes contains arbitrary data about this address intended + // for consumption by the LB policy. These attribes do not affect SubConn + // creation, connection establishment, handshaking, etc. + BalancerAttributes *attributes.Attributes + // Type is the type of this address. // // Deprecated: use Attributes instead. @@ -131,6 +138,20 @@ type Address struct { Metadata interface{} } +// Equal returns whether a and o are identical. Metadata is compared directly, +// not with any recursive introspection. +func (a Address) Equal(o Address) bool { + return a.Addr == o.Addr && a.ServerName == o.ServerName && + a.Attributes.Equal(o.Attributes) && + a.BalancerAttributes.Equal(o.BalancerAttributes) && + a.Type == o.Type && a.Metadata == o.Metadata +} + +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + return pretty.ToJSON(a) +} + // BuildOptions includes additional information for the builder to create // the resolver. type BuildOptions struct { @@ -181,7 +202,7 @@ type State struct { // gRPC to add new methods to this interface. type ClientConn interface { // UpdateState updates the state of the ClientConn appropriately. - UpdateState(State) + UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an // error. The ClientConn will notify the load balancer and begin calling // ResolveNow on the Resolver with exponential backoff. @@ -204,25 +225,36 @@ type ClientConn interface { // Target represents a target for gRPC, as specified in: // https://github.com/grpc/grpc/blob/master/doc/naming.md. -// It is parsed from the target string that gets passed into Dial or DialContext by the user. And -// grpc passes it to the resolver and the balancer. +// It is parsed from the target string that gets passed into Dial or DialContext +// by the user. And gRPC passes it to the resolver and the balancer. // -// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will -// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed -// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// If the target follows the naming spec, and the parsed scheme is registered +// with gRPC, we will parse the target string according to the spec. If the +// target does not contain a scheme or if the parsed scheme is not registered +// (i.e. no corresponding resolver available to resolve the endpoint), we will +// apply the default scheme, and will attempt to reparse it. // -// If the target does not contain a scheme, we will apply the default scheme, and set the Target to -// be the full target string. e.g. "foo.bar" will be parsed into -// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}. +// Examples: // -// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the -// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target -// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into -// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}. +// - "dns://some_authority/foo.bar" +// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// - "foo.bar" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +// - "unknown_scheme://authority/endpoint" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { - Scheme string + // Deprecated: use URL.Scheme instead. + Scheme string + // Deprecated: use URL.Host instead. Authority string - Endpoint string + // Deprecated: use URL.Path or URL.Opaque instead. The latter is set when + // the former is empty. + Endpoint string + // URL contains the parsed dial target with an optional default scheme added + // to it if the original dial target contained no scheme or contained an + // unregistered scheme. Any query params specified in the original dial + // target can be accessed from here. + URL url.URL } // Builder creates a resolver that will be used to watch name resolution updates. diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index f2d81968f..05a9d4e0b 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -19,15 +19,14 @@ package grpc import ( - "fmt" "strings" "sync" - "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -41,8 +40,7 @@ type ccResolverWrapper struct { done *grpcsync.Event curState resolver.State - pollingMu sync.Mutex - polling chan struct{} + incomingMu sync.Mutex // Synchronizes all the incoming calls. } // newCCResolverWrapper uses the resolver.Builder to build a Resolver and @@ -93,89 +91,51 @@ func (ccr *ccResolverWrapper) close() { ccr.resolverMu.Unlock() } -// poll begins or ends asynchronous polling of the resolver based on whether -// err is ErrBadResolverState. -func (ccr *ccResolverWrapper) poll(err error) { - ccr.pollingMu.Lock() - defer ccr.pollingMu.Unlock() - if err != balancer.ErrBadResolverState { - // stop polling - if ccr.polling != nil { - close(ccr.polling) - ccr.polling = nil - } - return - } - if ccr.polling != nil { - // already polling - return - } - p := make(chan struct{}) - ccr.polling = p - go func() { - for i := 0; ; i++ { - ccr.resolveNow(resolver.ResolveNowOptions{}) - t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i)) - select { - case <-p: - t.Stop() - return - case <-ccr.done.Done(): - // Resolver has been closed. - t.Stop() - return - case <-t.C: - select { - case <-p: - return - default: - } - // Timer expired; re-resolve. - } - } - }() -} - -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { - return - } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(s) + return nil } + ccr.addChannelzTraceEvent(s) ccr.curState = s - ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) + if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { + return balancer.ErrBadResolverState + } + return nil } func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return } channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err)) + ccr.cc.updateResolverState(resolver.State{}, err) } // NewAddress is called by the resolver implementation to send addresses to gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) ccr.curState.Addresses = addrs - ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) + ccr.cc.updateResolverState(ccr.curState, nil) } // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) if ccr.cc.dopts.disableServiceConfig { channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") return @@ -183,14 +143,11 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { scpr := parseServiceConfig(sc) if scpr.Err != nil { channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - ccr.poll(balancer.ErrBadResolverState) return } - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) ccr.curState.ServiceConfig = scpr - ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) + ccr.cc.updateResolverState(ccr.curState, nil) } func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { @@ -215,8 +172,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), - Severity: channelz.CtInfo, - }) + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index f0609f2a4..5d407b004 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -27,7 +27,6 @@ import ( "io" "io/ioutil" "math" - "net/url" "strings" "sync" "time" @@ -259,7 +258,8 @@ func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { } // WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false, the RPC will fail +// connections or unreachable servers. If waitForReady is false and the +// connection is in the TRANSIENT_FAILURE state, the RPC will fail // immediately. Otherwise, the RPC client will block the call until a // connection is available (or the call is canceled or times out) and will // retry the call if it fails due to a transient error. gRPC will not retry if @@ -430,9 +430,10 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { } func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} -// ForceCodec returns a CallOption that will set the given Codec to be -// used for all request and response messages for a call. The result of calling -// String() will be used as the content-subtype in a case-insensitive manner. +// ForceCodec returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. // // See Content-Type on // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for @@ -711,13 +712,11 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei if err != nil { return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } - } else { - size = len(d) - } - if size > maxReceiveMessageSize { - // TODO: Revisit the error code. Currently keep it consistent with java - // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize) + if size > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } } return d, nil } @@ -828,33 +827,45 @@ func Errorf(c codes.Code, format string, a ...interface{}) error { // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { - if err == nil || err == io.EOF { + switch err { + case nil, io.EOF: return err - } - if err == io.ErrUnexpectedEOF { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } - if _, ok := status.FromError(err); ok { - return err - } + switch e := err.(type) { case transport.ConnectionError: return status.Error(codes.Unavailable, e.Desc) - default: - switch err { - case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return status.Error(codes.Canceled, err.Error()) - } + case *transport.NewStreamError: + return toRPCErr(e.Err) } + + if _, ok := status.FromError(err); ok { + return err + } + return status.Error(codes.Unknown, err.Error()) } // setCallInfoCodec should only be called after CallOptions have been applied. func setCallInfoCodec(c *callInfo) error { if c.codec != nil { - // codec was already set by a CallOption; use it. + // codec was already set by a CallOption; use it, but set the content + // subtype if it is not set. + if c.contentSubtype == "" { + // c.codec is a baseCodec to hide the difference between grpc.Codec and + // encoding.Codec (Name vs. String method name). We only support + // setting content subtype from encoding.Codec to avoid a behavior + // change with the deprecated version. + if ec, ok := c.codec.(encoding.Codec); ok { + c.contentSubtype = strings.ToLower(ec.Name()) + } + } return nil } @@ -872,40 +883,6 @@ func setCallInfoCodec(c *callInfo) error { return nil } -// parseDialTarget returns the network and address to pass to dialer -func parseDialTarget(target string) (net string, addr string) { - net = "tcp" - - m1 := strings.Index(target, ":") - m2 := strings.Index(target, ":/") - - // handle unix:addr which will fail with url.Parse - if m1 >= 0 && m2 < 0 { - if n := target[0:m1]; n == "unix" { - net = n - addr = target[m1+1:] - return net, addr - } - } - if m2 >= 0 { - t, err := url.Parse(target) - if err != nil { - return net, target - } - scheme := t.Scheme - addr = t.Path - if scheme == "unix" { - net = scheme - if addr == "" { - addr = t.Host - } - return net, addr - } - } - - return net, target -} - // channelzData is used to store channelz related data for ClientConn, addrConn and Server. // These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic // operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. @@ -923,8 +900,7 @@ type channelzData struct { // buffer files to ensure compatibility with the gRPC version used. The latest // support package version is 7. // -// Older versions are kept for compatibility. They may be removed if -// compatibility cannot be maintained. +// Older versions are kept for compatibility. // // These constants should not be referenced from any other code. const ( diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 968eb598e..65de84b30 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -40,6 +40,7 @@ import ( "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" @@ -56,8 +57,24 @@ import ( const ( defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 defaultServerMaxSendMessageSize = math.MaxInt32 + + // Server transports are tracked in a map which is keyed on listener + // address. For regular gRPC traffic, connections are accepted in Serve() + // through a call to Accept(), and we use the actual listener address as key + // when we add it to the map. But for connections received through + // ServeHTTP(), we do not have a listener and hence use this dummy value. + listenerAddressForServeHTTP = "listenerAddressForServeHTTP" ) +func init() { + internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { + return srv.opts.creds + } + internal.DrainServerTransports = func(srv *Server, addr string) { + srv.drainServerTransports(addr) + } +} + var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") @@ -100,9 +117,12 @@ type serverWorkerData struct { type Server struct { opts serverOptions - mu sync.Mutex // guards following - lis map[net.Listener]bool - conns map[transport.ServerTransport]bool + mu sync.Mutex // guards following + lis map[net.Listener]bool + // conns contains all active server transports. It is a map keyed on a + // listener address with the value being the set of active transports + // belonging to that listener. + conns map[string]map[transport.ServerTransport]bool serve bool drain bool cv *sync.Cond // signaled when connections close for GracefulStop @@ -114,7 +134,7 @@ type Server struct { channelzRemoveOnce sync.Once serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData serverWorkerChannels []chan *serverWorkerData @@ -259,6 +279,35 @@ func CustomCodec(codec Codec) ServerOption { }) } +// ForceServerCodec returns a ServerOption that sets a codec for message +// marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered +// with RegisterCodec. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between encoding.Codec +// and content-subtype. +// +// This function is provided for advanced users; prefer to register codecs +// using encoding.RegisterCodec. +// The server will automatically use registered codecs based on the incoming +// requests' headers. See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodec(codec encoding.Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + // RPCCompressor returns a ServerOption that sets a compressor for outbound // messages. For backward compatibility, all outbound messages will be sent // using this compressor, regardless of incoming message compression. By @@ -369,6 +418,11 @@ func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOptio // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func InTapHandle(h tap.ServerInHandle) ServerOption { return newFuncServerOption(func(o *serverOptions) { if o.inTapHandle != nil { @@ -512,7 +566,7 @@ func NewServer(opt ...ServerOption) *Server { s := &Server{ lis: make(map[net.Listener]bool), opts: opts, - conns: make(map[transport.ServerTransport]bool), + conns: make(map[string]map[transport.ServerTransport]bool), services: make(map[string]*serviceInfo), quit: grpcsync.NewEvent(), done: grpcsync.NewEvent(), @@ -530,9 +584,8 @@ func NewServer(opt ...ServerOption) *Server { s.initServerWorkers() } - if channelz.IsOn() { - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - } + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + channelz.Info(logger, s.channelzID, "Server created") return s } @@ -656,16 +709,9 @@ func (s *Server) GetServiceInfo() map[string]ServiceInfo { // the server being stopped. var ErrServerStopped = errors.New("grpc: the server has been stopped") -func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if s.opts.creds == nil { - return rawConn, nil, nil - } - return s.opts.creds.ServerHandshake(rawConn) -} - type listenSocket struct { net.Listener - channelzID int64 + channelzID *channelz.Identifier } func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { @@ -677,9 +723,8 @@ func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { func (l *listenSocket) Close() error { err := l.Listener.Close() - if channelz.IsOn() { - channelz.RemoveEntry(l.channelzID) - } + channelz.RemoveEntry(l.channelzID) + channelz.Info(logger, l.channelzID, "ListenSocket deleted") return err } @@ -712,11 +757,6 @@ func (s *Server) Serve(lis net.Listener) error { ls := &listenSocket{Listener: lis} s.lis[ls] = true - if channelz.IsOn() { - ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - } - s.mu.Unlock() - defer func() { s.mu.Lock() if s.lis != nil && s.lis[ls] { @@ -726,8 +766,16 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() - var tempDelay time.Duration // how long to sleep on accept failure + var err error + ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + if err != nil { + s.mu.Unlock() + return err + } + s.mu.Unlock() + channelz.Info(logger, ls.channelzID, "ListenSocket created") + var tempDelay time.Duration // how long to sleep on accept failure for { rawConn, err := lis.Accept() if err != nil { @@ -771,7 +819,7 @@ func (s *Server) Serve(lis net.Listener) error { // s.conns before this conn can be added. s.serveWG.Add(1) go func() { - s.handleRawConn(rawConn) + s.handleRawConn(lis.Addr().String(), rawConn) s.serveWG.Done() }() } @@ -779,49 +827,45 @@ func (s *Server) Serve(lis net.Listener) error { // handleRawConn forks a goroutine to handle a just-accepted connection that // has not had any I/O performed on it yet. -func (s *Server) handleRawConn(rawConn net.Conn) { +func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { if s.quit.HasFired() { rawConn.Close() return } rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) - conn, authInfo, err := s.useTransportAuthenticator(rawConn) - if err != nil { - // ErrConnDispatched means that the connection was dispatched away from - // gRPC; those connections should be left open. - if err != credentials.ErrConnDispatched { - s.mu.Lock() - s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) - s.mu.Unlock() - channelz.Warningf(logger, s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) - rawConn.Close() - } - rawConn.SetDeadline(time.Time{}) - return - } // Finish handshaking (HTTP2) - st := s.newHTTP2Transport(conn, authInfo) + st := s.newHTTP2Transport(rawConn) + rawConn.SetDeadline(time.Time{}) if st == nil { return } - rawConn.SetDeadline(time.Time{}) - if !s.addConn(st) { + if !s.addConn(lisAddr, st) { return } go func() { s.serveStreams(st) - s.removeConn(st) + s.removeConn(lisAddr, st) }() } +func (s *Server) drainServerTransports(addr string) { + s.mu.Lock() + conns := s.conns[addr] + for st := range conns { + st.Drain() + } + s.mu.Unlock() +} + // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). -func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { +func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { config := &transport.ServerConfig{ MaxStreams: s.opts.maxConcurrentStreams, - AuthInfo: authInfo, + ConnectionTimeout: s.opts.connectionTimeout, + Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, StatsHandler: s.opts.statsHandler, KeepaliveParams: s.opts.keepaliveParams, @@ -834,13 +878,20 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, } - st, err := transport.NewServerTransport("http2", c, config) + st, err := transport.NewServerTransport(c, config) if err != nil { s.mu.Lock() s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() - c.Close() - channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + // Don't log on ErrConnDispatched and io.EOF to prevent log spam. + if err != io.EOF { + channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + } + c.Close() + } return nil } @@ -917,10 +968,10 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } - if !s.addConn(st) { + if !s.addConn(listenerAddressForServeHTTP, st) { return } - defer s.removeConn(st) + defer s.removeConn(listenerAddressForServeHTTP, st) s.serveStreams(st) } @@ -948,7 +999,7 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea return trInfo } -func (s *Server) addConn(st transport.ServerTransport) bool { +func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { @@ -960,15 +1011,28 @@ func (s *Server) addConn(st transport.ServerTransport) bool { // immediately. st.Drain() } - s.conns[st] = true + + if s.conns[addr] == nil { + // Create a map entry if this is the first connection on this listener. + s.conns[addr] = make(map[transport.ServerTransport]bool) + } + s.conns[addr][st] = true return true } -func (s *Server) removeConn(st transport.ServerTransport) { +func (s *Server) removeConn(addr string, st transport.ServerTransport) { s.mu.Lock() defer s.mu.Unlock() - if s.conns != nil { - delete(s.conns, st) + + conns := s.conns[addr] + if conns != nil { + delete(conns, st) + if len(conns) == 0 { + // If the last connection for this address is being removed, also + // remove the map entry corresponding to the address. This is used + // in GracefulStop() when waiting for all connections to be closed. + delete(s.conns, addr) + } s.cv.Broadcast() } } @@ -1033,22 +1097,29 @@ func chainUnaryServerInterceptors(s *Server) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) - } + chainedInt = chainUnaryInterceptors(interceptors) } s.opts.unaryInt = chainedInt } -// getChainUnaryHandler recursively generate the chained UnaryHandler -func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { - if curr == len(interceptors)-1 { - return finalHandler - } - - return func(ctx context.Context, req interface{}) (interface{}, error) { - return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) +func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + // the struct ensures the variables are allocated together, rather than separately, since we + // know they should be garbage collected together. This saves 1 allocation and decreases + // time/call by about 10% on the microbenchmark. + var state struct { + i int + next UnaryHandler + } + state.next = func(ctx context.Context, req interface{}) (interface{}, error) { + if state.i == len(interceptors)-1 { + return interceptors[state.i](ctx, req, info, handler) + } + state.i++ + return interceptors[state.i-1](ctx, req, info, state.next) + } + return state.next(ctx, req) } } @@ -1062,7 +1133,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if sh != nil { beginTime := time.Now() statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: beginTime, + IsClientStream: false, + IsServerStream: false, } sh.HandleRPC(stream.Context(), statsBegin) } @@ -1211,9 +1284,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - // Convert appErr if it is not a grpc status error. - appErr = status.Error(codes.Unknown, appErr.Error()) - appStatus, _ = status.FromError(appErr) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() } if trInfo != nil { trInfo.tr.LazyLog(stringer(appStatus.Message()), true) @@ -1314,22 +1388,29 @@ func chainStreamServerInterceptors(s *Server) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) - } + chainedInt = chainStreamInterceptors(interceptors) } s.opts.streamInt = chainedInt } -// getChainStreamHandler recursively generate the chained StreamHandler -func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { - if curr == len(interceptors)-1 { - return finalHandler - } - - return func(srv interface{}, ss ServerStream) error { - return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) +func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { + return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + // the struct ensures the variables are allocated together, rather than separately, since we + // know they should be garbage collected together. This saves 1 allocation and decreases + // time/call by about 10% on the microbenchmark. + var state struct { + i int + next StreamHandler + } + state.next = func(srv interface{}, ss ServerStream) error { + if state.i == len(interceptors)-1 { + return interceptors[state.i](srv, ss, info, handler) + } + state.i++ + return interceptors[state.i-1](srv, ss, info, state.next) + } + return state.next(srv, ss) } } @@ -1342,7 +1423,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if sh != nil { beginTime := time.Now() statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: beginTime, + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, } sh.HandleRPC(stream.Context(), statsBegin) } @@ -1445,6 +1528,8 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp } } + ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) } @@ -1466,7 +1551,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - appStatus = status.New(codes.Unknown, appErr.Error()) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) appErr = appStatus.Err() } if trInfo != nil { @@ -1512,7 +1599,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() @@ -1623,16 +1710,12 @@ func (s *Server) Stop() { s.done.Fire() }() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() listeners := s.lis s.lis = nil - st := s.conns + conns := s.conns s.conns = nil // interrupt GracefulStop if Stop and GracefulStop are called concurrently. s.cv.Broadcast() @@ -1641,8 +1724,10 @@ func (s *Server) Stop() { for lis := range listeners { lis.Close() } - for c := range st { - c.Close() + for _, cs := range conns { + for st := range cs { + st.Close() + } } if s.opts.numServerWorkers > 0 { s.stopServerWorkers() @@ -1663,11 +1748,7 @@ func (s *Server) GracefulStop() { s.quit.Fire() defer s.done.Fire() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() if s.conns == nil { s.mu.Unlock() @@ -1679,8 +1760,10 @@ func (s *Server) GracefulStop() { } s.lis = nil if !s.drain { - for st := range s.conns { - st.Drain() + for _, conns := range s.conns { + for st := range conns { + st.Drain() + } } s.drain = true } @@ -1718,12 +1801,26 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return codec } -// SetHeader sets the header metadata. -// When called multiple times, all the provided metadata will be merged. -// All the metadata will be sent out when one of the following happens: -// - grpc.SendHeader() is called; -// - The first response is sent out; -// - An RPC status is sent out (error or success). +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetHeader(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1735,8 +1832,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error { return stream.SetHeader(md) } -// SendHeader sends header metadata. It may be called at most once. -// The provided md and headers set by SetHeader() will be sent. +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SendHeader(ctx context.Context, md metadata.MD) error { stream := ServerTransportStreamFromContext(ctx) if stream == nil { @@ -1750,6 +1853,10 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 5e434ca7f..b01c548bb 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -41,29 +41,7 @@ const maxInt = int(^uint(0) >> 1) // Deprecated: Users should not use this struct. Service config should be received // through name resolver, as specified here // https://github.com/grpc/grpc/blob/master/doc/service_config.md -type MethodConfig struct { - // WaitForReady indicates whether RPCs sent to this method should wait until - // the connection is ready by default (!failfast). The value specified via the - // gRPC client API will override the value set here. - WaitForReady *bool - // Timeout is the default timeout for RPCs sent to this method. The actual - // deadline used will be the minimum of the value specified here and the value - // set by the application via the gRPC client API. If either one is not set, - // then the other will be used. If neither is set, then the RPC has no deadline. - Timeout *time.Duration - // MaxReqSize is the maximum allowed payload size for an individual request in a - // stream (client->server) in bytes. The size which is measured is the serialized - // payload after per-message compression (but before stream compression) in bytes. - // The actual value used is the minimum of the value specified here and the value set - // by the application via the gRPC client API. If either one is not set, then the other - // will be used. If neither is set, then the built-in default is used. - MaxReqSize *int - // MaxRespSize is the maximum allowed payload size for an individual response in a - // stream (server->client) in bytes. - MaxRespSize *int - // RetryPolicy configures retry options for the method. - retryPolicy *retryPolicy -} +type MethodConfig = internalserviceconfig.MethodConfig type lbConfig struct { name string @@ -127,34 +105,6 @@ type healthCheckConfig struct { ServiceName string } -// retryPolicy defines the go-native version of the retry policy defined by the -// service config here: -// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config -type retryPolicy struct { - // MaxAttempts is the maximum number of attempts, including the original RPC. - // - // This field is required and must be two or greater. - maxAttempts int - - // Exponential backoff parameters. The initial retry attempt will occur at - // random(0, initialBackoff). In general, the nth attempt will occur at - // random(0, - // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). - // - // These fields are required and must be greater than zero. - initialBackoff time.Duration - maxBackoff time.Duration - backoffMultiplier float64 - - // The set of status codes which may be retried. - // - // Status codes are specified as strings, e.g., "UNAVAILABLE". - // - // This field is required and must be non-empty. - // Note: a set is used to store this for easy lookup. - retryableStatusCodes map[codes.Code]bool -} - type jsonRetryPolicy struct { MaxAttempts int InitialBackoff string @@ -268,7 +218,7 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfigForTesting = parseServiceConfig + internal.ParseServiceConfig = parseServiceConfig } func parseServiceConfig(js string) *serviceconfig.ParseResult { if len(js) == 0 { @@ -313,7 +263,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { WaitForReady: m.WaitForReady, Timeout: d, } - if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) return &serviceconfig.ParseResult{Err: err} } @@ -359,7 +309,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } -func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { +func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } @@ -381,19 +331,19 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { return nil, nil } - rp := &retryPolicy{ - maxAttempts: jrp.MaxAttempts, - initialBackoff: *ib, - maxBackoff: *mb, - backoffMultiplier: jrp.BackoffMultiplier, - retryableStatusCodes: make(map[codes.Code]bool), + rp := &internalserviceconfig.RetryPolicy{ + MaxAttempts: jrp.MaxAttempts, + InitialBackoff: *ib, + MaxBackoff: *mb, + BackoffMultiplier: jrp.BackoffMultiplier, + RetryableStatusCodes: make(map[codes.Code]bool), } - if rp.maxAttempts > 5 { + if rp.MaxAttempts > 5 { // TODO(retry): Make the max maxAttempts configurable. - rp.maxAttempts = 5 + rp.MaxAttempts = 5 } for _, code := range jrp.RetryableStatusCodes { - rp.retryableStatusCodes[code] = true + rp.RetryableStatusCodes[code] = true } return rp, nil } @@ -431,6 +381,9 @@ func init() { // // If any of them is NOT *ServiceConfig, return false. func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } aa, ok := a.(*ServiceConfig) if !ok { return false diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 63e476ee7..0285dcc6a 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -36,15 +36,22 @@ type RPCStats interface { IsClient() bool } -// Begin contains stats when an RPC begins. +// Begin contains stats when an RPC attempt begins. // FailFast is only valid if this Begin is from client side. type Begin struct { // Client is true if this Begin is from client side. Client bool - // BeginTime is the time when the RPC begins. + // BeginTime is the time when the RPC attempt begins. BeginTime time.Time // FailFast indicates if this RPC is failfast. FailFast bool + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool + // IsTransparentRetryAttempt indicates whether this attempt was initiated + // due to transparently retrying a previous attempt. + IsTransparentRetryAttempt bool } // IsClient indicates if the stats information is from client side. diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 01e182c30..6d163b6e3 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -29,6 +29,7 @@ package status import ( "context" + "errors" "fmt" spb "google.golang.org/genproto/googleapis/rpc/status" @@ -73,9 +74,16 @@ func FromProto(s *spb.Status) *Status { return status.FromProto(s) } -// FromError returns a Status representing err if it was produced from this -// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a -// Status is returned with codes.Unknown and the original error message. +// FromError returns a Status representation of err. +// +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status`, the appropriate Status is returned. +// +// - If err is nil, a Status is returned with codes.OK and no message. +// +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true @@ -110,18 +118,18 @@ func Code(err error) codes.Code { return codes.Unknown } -// FromContextError converts a context error into a Status. It returns a -// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is -// non-nil and not a context error. +// FromContextError converts a context error or wrapped context error into a +// Status. It returns a Status with codes.OK if err is nil, or a Status with +// codes.Unknown if err is non-nil and not a context error. func FromContextError(err error) *Status { - switch err { - case nil: + if err == nil { return nil - case context.DeadlineExceeded: + } + if errors.Is(err, context.DeadlineExceeded) { return New(codes.DeadlineExceeded, err.Error()) - case context.Canceled: + } + if errors.Is(err, context.Canceled) { return New(codes.Canceled, err.Error()) - default: - return New(codes.Unknown, err.Error()) } + return New(codes.Unknown, err.Error()) } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 5fd856a38..236fc17ec 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -36,6 +36,9 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -44,20 +47,28 @@ import ( ) // StreamHandler defines the handler called by gRPC server to complete the -// execution of a streaming RPC. If a StreamHandler returns an error, it -// should be produced by the status package, or else gRPC will use -// codes.Unknown as the status code and err.Error() as the status message -// of the RPC. +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type StreamHandler func(srv interface{}, stream ServerStream) error -// StreamDesc represents a streaming RPC service's method specification. +// StreamDesc represents a streaming RPC service's method specification. Used +// on the server when registering services and on the client when initiating +// new streams. type StreamDesc struct { - StreamName string - Handler StreamHandler - - // At least one of these is true. - ServerStreams bool - ClientStreams bool + // StreamName and Handler are only used when registering handlers on a + // server. + StreamName string // the name of the method excluding the service + Handler StreamHandler // the handler called for the method + + // ServerStreams and ClientStreams are used for registering handlers on a + // server as well as defining RPC behavior when passed to NewClientStream + // and ClientConn.NewStream. At least one must be true. + ServerStreams bool // indicates the server can perform streaming sends + ClientStreams bool // indicates the client can perform streaming sends } // Stream defines the common interface a client or server stream has to satisfy. @@ -156,6 +167,11 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } if channelz.IsOn() { cc.incrCallsStarted() defer func() { @@ -164,13 +180,48 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } }() } - c := defaultCallInfo() // Provide an opportunity for the first RPC to see the first service config // provided by the resolver. if err := cc.waitForResolvedAddrs(ctx); err != nil { return nil, err } - mc := cc.GetMethodConfig(method) + + var mc serviceconfig.MethodConfig + var onCommit func() + var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) + } + + rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} + rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) + if err != nil { + return nil, toRPCErr(err) + } + + if rpcConfig != nil { + if rpcConfig.Context != nil { + ctx = rpcConfig.Context + } + mc = rpcConfig.MethodConfig + onCommit = rpcConfig.OnCommitted + if rpcConfig.Interceptor != nil { + rpcInfo.Context = nil + ns := newStream + newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns) + if err != nil { + return nil, toRPCErr(err) + } + return cs, nil + } + } + } + + return newStream(ctx, func() {}) +} + +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { + c := defaultCallInfo() if mc.WaitForReady != nil { c.failFast = !*mc.WaitForReady } @@ -207,6 +258,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth Host: cc.authority, Method: method, ContentSubtype: c.contentSubtype, + DoneFunc: doneFunc, } // Set our outgoing compression according to the UseCompressor CallOption, if @@ -230,33 +282,6 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth if c.creds != nil { callHdr.Creds = c.creds } - var trInfo *traceInfo - if EnableTracing { - trInfo = &traceInfo{ - tr: trace.New("grpc.Sent."+methodFamily(method), method), - firstLine: firstLine{ - client: true, - }, - } - if deadline, ok := ctx.Deadline(); ok { - trInfo.firstLine.deadline = time.Until(deadline) - } - trInfo.tr.LazyLog(&trInfo.firstLine, false) - ctx = trace.NewContext(ctx, trInfo.tr) - } - ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp) - sh := cc.dopts.copts.StatsHandler - var beginTime time.Time - if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) - beginTime = time.Now() - begin := &stats.Begin{ - Client: true, - BeginTime: beginTime, - FailFast: c.failFast, - } - sh.HandleRPC(ctx, begin) - } cs := &clientStream{ callHdr: callHdr, @@ -270,24 +295,36 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth cp: cp, comp: comp, cancel: cancel, - beginTime: beginTime, firstAttempt: true, + onCommit: onCommit, } if !cc.dopts.disableRetry { cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) } cs.binlog = binarylog.GetMethodLogger(method) - // Only this initial attempt has stats/tracing. - // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. - if err := cs.newAttemptLocked(sh, trInfo); err != nil { + cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */) + if err != nil { cs.finish(err) return nil, err } - op := func(a *csAttempt) error { return a.newStream() } + // Pick the transport to use and create a new stream on the transport. + // Assign cs.attempt upon success. + op := func(a *csAttempt) error { + if err := a.getTransport(); err != nil { + return err + } + if err := a.newStream(); err != nil { + return err + } + // Because this operation is always called either here (while creating + // the clientStream) or by the retry code while locked when replaying + // the operation, it is safe to access cs.attempt directly. + cs.attempt = a + return nil + } if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { - cs.finish(err) return nil, err } @@ -326,63 +363,104 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return cs, nil } -// newAttemptLocked creates a new attempt with a transport. -// If it succeeds, then it replaces clientStream's attempt with this new attempt. -func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) { - newAttempt := &csAttempt{ - cs: cs, - dc: cs.cc.dopts.dc, - statsHandler: sh, - trInfo: trInfo, +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { + if err := cs.ctx.Err(); err != nil { + return nil, toRPCErr(err) } - defer func() { - if retErr != nil { - // This attempt is not set in the clientStream, so it's finish won't - // be called. Call it here for stats and trace in case they are not - // nil. - newAttempt.finish(retErr) + if err := cs.cc.ctx.Err(); err != nil { + return nil, ErrClientConnClosing + } + + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) + method := cs.callHdr.Method + sh := cs.cc.dopts.copts.StatsHandler + var beginTime time.Time + if sh != nil { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: cs.callInfo.failFast, + IsClientStream: cs.desc.ClientStreams, + IsServerStream: cs.desc.ServerStreams, + IsTransparentRetryAttempt: isTransparent, } - }() + sh.HandleRPC(ctx, begin) + } - if err := cs.ctx.Err(); err != nil { - return toRPCErr(err) + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: trace.New("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) } - ctx := cs.ctx if cs.cc.parsedTarget.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. - ctx = grpcutil.WithExtraMetadata(cs.ctx, metadata.Pairs( + ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), )) } - t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) + + return &csAttempt{ + ctx: ctx, + beginTime: beginTime, + cs: cs, + dc: cs.cc.dopts.dc, + statsHandler: sh, + trInfo: trInfo, + }, nil +} + +func (a *csAttempt) getTransport() error { + cs := a.cs + + var err error + a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { + if de, ok := err.(dropError); ok { + err = de.error + a.drop = true + } return err } - if trInfo != nil { - trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + if a.trInfo != nil { + a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) } - newAttempt.t = t - newAttempt.done = done - cs.attempt = newAttempt return nil } func (a *csAttempt) newStream() error { cs := a.cs cs.callHdr.PreviousAttempts = cs.numRetries - s, err := a.t.NewStream(cs.ctx, cs.callHdr) + s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { - if _, ok := err.(transport.PerformedIOError); ok { - // Return without converting to an RPC error so retry code can - // inspect. + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected. return err } - return toRPCErr(err) + + if nse.AllowTransparentRetry { + a.allowTransparentRetry = true + } + + // Unwrap and convert error. + return toRPCErr(nse.Err) } - cs.attempt.s = s - cs.attempt.p = &parser{r: s} + a.s = s + a.p = &parser{r: s} return nil } @@ -400,8 +478,7 @@ type clientStream struct { cancel context.CancelFunc // cancels all attempts - sentLast bool // sent an end stream - beginTime time.Time + sentLast bool // sent an end stream methodConfig *MethodConfig @@ -409,7 +486,7 @@ type clientStream struct { retryThrottler *retryThrottler // The throttler active when the RPC began. - binlog *binarylog.MethodLogger // Binary logger, can be nil. + binlog binarylog.MethodLogger // Binary logger, can be nil. // serverHeaderBinlogged is a boolean for whether server header has been // logged. Server header will be logged when the first time one of those // happens: stream.Header(), stream.Recv(). @@ -432,7 +509,8 @@ type clientStream struct { // place where we need to check if the attempt is nil. attempt *csAttempt // TODO(hedging): hedging will have multiple attempts simultaneously. - committed bool // active attempt committed for retry? + committed bool // active attempt committed for retry? + onCommit func() buffer []func(a *csAttempt) error // operations to replay on retry bufferSize int // current size of buffer } @@ -440,6 +518,7 @@ type clientStream struct { // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { + ctx context.Context cs *clientStream t transport.ClientTransport s *transport.Stream @@ -458,9 +537,18 @@ type csAttempt struct { trInfo *traceInfo statsHandler stats.Handler + beginTime time.Time + + // set for newStream errors that may be transparently retried + allowTransparentRetry bool + // set for pick errors that are returned as a status + drop bool } func (cs *clientStream) commitAttemptLocked() { + if !cs.committed && cs.onCommit != nil { + cs.onCommit() + } cs.committed = true cs.buffer = nil } @@ -472,85 +560,76 @@ func (cs *clientStream) commitAttempt() { } // shouldRetry returns nil if the RPC should be retried; otherwise it returns -// the error that should be returned by the operation. -func (cs *clientStream) shouldRetry(err error) error { - unprocessed := false - if cs.attempt.s == nil { - pioErr, ok := err.(transport.PerformedIOError) - if ok { - // Unwrap error. - err = toRPCErr(pioErr.Err) - } else { - unprocessed = true - } - if !ok && !cs.callInfo.failFast { - // In the event of a non-IO operation error from NewStream, we - // never attempted to write anything to the wire, so we can retry - // indefinitely for non-fail-fast RPCs. - return nil - } +// the error that should be returned by the operation. If the RPC should be +// retried, the bool indicates whether it is being retried transparently. +func (a *csAttempt) shouldRetry(err error) (bool, error) { + cs := a.cs + + if cs.finished || cs.committed || a.drop { + // RPC is finished or committed or was dropped by the picker; cannot retry. + return false, err } - if cs.finished || cs.committed { - // RPC is finished or committed; cannot retry. - return err + if a.s == nil && a.allowTransparentRetry { + return true, nil } // Wait for the trailers. - if cs.attempt.s != nil { - <-cs.attempt.s.Done() - unprocessed = cs.attempt.s.Unprocessed() + unprocessed := false + if a.s != nil { + <-a.s.Done() + unprocessed = a.s.Unprocessed() } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. - return nil + return true, nil } if cs.cc.dopts.disableRetry { - return err + return false, err } pushback := 0 hasPushback := false - if cs.attempt.s != nil { - if !cs.attempt.s.TrailersOnly() { - return err + if a.s != nil { + if !a.s.TrailersOnly() { + return false, err } // TODO(retry): Move down if the spec changes to not check server pushback // before considering this a failure for throttling. - sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + sps := a.s.Trailer()["grpc-retry-pushback-ms"] if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err + return false, err } hasPushback = true } else if len(sps) > 1 { channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err + return false, err } } var code codes.Code - if cs.attempt.s != nil { - code = cs.attempt.s.Status().Code() + if a.s != nil { + code = a.s.Status().Code() } else { - code = status.Convert(err).Code() + code = status.Code(err) } - rp := cs.methodConfig.retryPolicy - if rp == nil || !rp.retryableStatusCodes[code] { - return err + rp := cs.methodConfig.RetryPolicy + if rp == nil || !rp.RetryableStatusCodes[code] { + return false, err } // Note: the ordering here is important; we count this as a failure // only if the code matched a retryable code. if cs.retryThrottler.throttle() { - return err + return false, err } - if cs.numRetries+1 >= rp.maxAttempts { - return err + if cs.numRetries+1 >= rp.MaxAttempts { + return false, err } var dur time.Duration @@ -558,9 +637,9 @@ func (cs *clientStream) shouldRetry(err error) error { dur = time.Millisecond * time.Duration(pushback) cs.numRetriesSincePushback = 0 } else { - fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback)) - cur := float64(rp.initialBackoff) * fact - if max := float64(rp.maxBackoff); cur > max { + fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) + cur := float64(rp.InitialBackoff) * fact + if max := float64(rp.MaxBackoff); cur > max { cur = max } dur = time.Duration(grpcrand.Int63n(int64(cur))) @@ -573,26 +652,32 @@ func (cs *clientStream) shouldRetry(err error) error { select { case <-t.C: cs.numRetries++ - return nil + return false, nil case <-cs.ctx.Done(): t.Stop() - return status.FromContextError(cs.ctx.Err()).Err() + return false, status.FromContextError(cs.ctx.Err()).Err() } } // Returns nil if a retry was performed and succeeded; error otherwise. -func (cs *clientStream) retryLocked(lastErr error) error { +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { for { - cs.attempt.finish(lastErr) - if err := cs.shouldRetry(lastErr); err != nil { + attempt.finish(toRPCErr(lastErr)) + isTransparent, err := attempt.shouldRetry(lastErr) + if err != nil { cs.commitAttemptLocked() return err } cs.firstAttempt = false - if err := cs.newAttemptLocked(nil, nil); err != nil { + attempt, err = cs.newAttemptLocked(isTransparent) + if err != nil { + // Only returns error if the clientconn is closed or the context of + // the stream is canceled. return err } - if lastErr = cs.replayBufferLocked(); lastErr == nil { + // Note that the first op in the replay buffer always sets cs.attempt + // if it is able to pick a transport and create a stream. + if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil } } @@ -602,7 +687,10 @@ func (cs *clientStream) Context() context.Context { cs.commitAttempt() // No need to lock before using attempt, since we know it is committed and // cannot change. - return cs.attempt.s.Context() + if cs.attempt.s != nil { + return cs.attempt.s.Context() + } + return cs.ctx } func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { @@ -610,7 +698,11 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) for { if cs.committed { cs.mu.Unlock() - return op(cs.attempt) + // toRPCErr is used in case the error from the attempt comes from + // NewClientStream, which intentionally doesn't return a status + // error to allow for further inspection; all other errors should + // already be status errors. + return toRPCErr(op(cs.attempt)) } a := cs.attempt cs.mu.Unlock() @@ -628,7 +720,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) cs.mu.Unlock() return err } - if err := cs.retryLocked(err); err != nil { + if err := cs.retryLocked(a, err); err != nil { cs.mu.Unlock() return err } @@ -659,7 +751,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { cs.binlog.Log(logEntry) cs.serverHeaderBinlogged = true } - return m, err + return m, nil } func (cs *clientStream) Trailer() metadata.MD { @@ -677,10 +769,9 @@ func (cs *clientStream) Trailer() metadata.MD { return cs.attempt.s.Trailer() } -func (cs *clientStream) replayBufferLocked() error { - a := cs.attempt +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { for _, f := range cs.buffer { - if err := f(a); err != nil { + if err := f(attempt); err != nil { return err } } @@ -728,22 +819,17 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { if len(payload) > *cs.callInfo.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) } - msgBytes := data // Store the pointer before setting to nil. For binary logging. op := func(a *csAttempt) error { - err := a.sendMsg(m, hdr, payload, data) - // nil out the message and uncomp when replaying; they are only needed for - // stats which is disabled for subsequent attempts. - m, data = nil, nil - return err + return a.sendMsg(m, hdr, payload, data) } err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) if cs.binlog != nil && err == nil { cs.binlog.Log(&binarylog.ClientMessage{ OnClientSide: true, - Message: msgBytes, + Message: data, }) } - return + return err } func (cs *clientStream) RecvMsg(m interface{}) error { @@ -875,7 +961,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { return io.EOF } if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) + a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) } if channelz.IsOn() { a.t.IncrMsgSent() @@ -923,7 +1009,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { a.mu.Unlock() } if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{ + a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, @@ -985,12 +1071,12 @@ func (a *csAttempt) finish(err error) { if a.statsHandler != nil { end := &stats.End{ Client: true, - BeginTime: a.cs.beginTime, + BeginTime: a.beginTime, EndTime: time.Now(), Trailer: tr, Error: err, } - a.statsHandler.HandleRPC(a.cs.ctx, end) + a.statsHandler.HandleRPC(a.ctx, end) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { @@ -1295,8 +1381,10 @@ func (as *addrConnStream) finish(err error) { // ServerStream defines the server-side behavior of a streaming RPC. // -// All errors returned from ServerStream methods are compatible with the -// status package. +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. type ServerStream interface { // SetHeader sets the header metadata. It may be called multiple times. // When call multiple times, all the provided metadata will be merged. @@ -1359,7 +1447,7 @@ type serverStream struct { statsHandler stats.Handler - binlog *binarylog.MethodLogger + binlog binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It // will happen when one of the following two happens: stream.SendHeader(), // stream.Send(). @@ -1379,11 +1467,20 @@ func (ss *serverStream) SetHeader(md metadata.MD) error { if md.Len() == 0 { return nil } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } return ss.s.SetHeader(md) } func (ss *serverStream) SendHeader(md metadata.MD) error { - err := ss.t.WriteHeader(ss.s, md) + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) if ss.binlog != nil && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() ss.binlog.Log(&binarylog.ServerHeader{ @@ -1398,6 +1495,9 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { if md.Len() == 0 { return } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } ss.s.SetTrailer(md) } diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index caea1ebed..dbf34e6bb 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -37,16 +37,16 @@ type Info struct { // TODO: More to be added. } -// ServerInHandle defines the function which runs before a new stream is created -// on the server side. If it returns a non-nil error, the stream will not be -// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. -// The client will receive an RPC error "code = Unavailable, desc = stream -// terminated by RST_STREAM with error code: REFUSED_STREAM". +// ServerInHandle defines the function which runs before a new stream is +// created on the server side. If it returns a non-nil error, the stream will +// not be created and an error will be returned to the client. If the error +// returned is a status error, that status code and message will be used, +// otherwise PermissionDenied will be the code and err.Error() will be the +// message. // // It's intended to be used in situations where you don't want to waste the -// resources to accept the new stream (e.g. rate-limiting). And the content of -// the error will be ignored and won't be sent back to the client. For other -// general usages, please use interceptors. +// resources to accept the new stream (e.g. rate-limiting). For other general +// usages, please use interceptors. // // Note that it is executed in the per-connection I/O goroutine(s) instead of // per-RPC goroutine. Therefore, users should NOT have any diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 144b0bf9e..5bc03f9b3 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.33.2" +const Version = "1.47.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index 48652f604..ceb436c6c 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -28,32 +28,21 @@ cleanup() { } trap cleanup EXIT -PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}" +PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" +go version if [[ "$1" = "-install" ]]; then - # Check for module support - if go help mod >& /dev/null; then - # Install the pinned versions as defined in module tools. - pushd ./test/tools - go install \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - popd - else - # Ye olde `go get` incantation. - # Note: this gets the latest version of all tools (vs. the pinned versions - # with Go modules). - go get -u \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - fi + # Install the pinned versions as defined in module tools. + pushd ./test/tools + go install \ + golang.org/x/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell + popd if [[ -z "${VET_SKIP_PROTO}" ]]; then if [[ "${TRAVIS}" = "true" ]]; then - PROTOBUF_VERSION=3.3.0 + PROTOBUF_VERSION=3.14.0 PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip pushd /home/travis wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} @@ -61,7 +50,7 @@ if [[ "$1" = "-install" ]]; then bin/protoc --version popd elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then - PROTOBUF_VERSION=3.3.0 + PROTOBUF_VERSION=3.14.0 PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip pushd /home/runner/go wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} @@ -100,16 +89,6 @@ not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' -# - Check imports that are illegal in appengine (until Go 1.11). -# TODO: Remove when we drop Go 1.10 support -go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go - -# - gofmt, goimports, golint (with exceptions for generated code), go vet. -gofmt -s -d -l . 2>&1 | fail_on_output -goimports -l . 2>&1 | not grep -vE "\.pb\.go" -golint ./... 2>&1 | not grep -vE "\.pb\.go:" -go vet -all ./... - misspell -error . # - Check that generated proto files are up to date. @@ -119,12 +98,22 @@ if [[ -z "${VET_SKIP_PROTO}" ]]; then (git status; git --no-pager diff; exit 1) fi -# - Check that our modules are tidy. -if go help mod >& /dev/null; then - find . -name 'go.mod' | xargs -IXXX bash -c 'cd $(dirname XXX); go mod tidy' +# - gofmt, goimports, golint (with exceptions for generated code), go vet, +# go mod tidy. +# Perform these checks on each module inside gRPC. +for MOD_FILE in $(find . -name 'go.mod'); do + MOD_DIR=$(dirname ${MOD_FILE}) + pushd ${MOD_DIR} + go vet -all ./... | fail_on_output + gofmt -s -d -l . 2>&1 | fail_on_output + goimports -l . 2>&1 | not grep -vE "\.pb\.go" + golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" + + go mod tidy git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) -fi + popd +done # - Collection of static analysis checks # @@ -141,8 +130,11 @@ not grep -Fv '.CredsBundle .NewAddress .NewServiceConfig .Type is deprecated: use Attributes +BuildVersion is deprecated balancer.ErrTransientFailure balancer.Picker +extDesc.Filename is deprecated +github.com/golang/protobuf/jsonpb is deprecated grpc.CallCustomCodec grpc.Code grpc.Compressor @@ -164,13 +156,7 @@ grpc.WithServiceConfig grpc.WithTimeout http.CloseNotifier info.SecurityVersion -resolver.Backend -resolver.GRPCLB -extDesc.Filename is deprecated -BuildVersion is deprecated -github.com/golang/protobuf/jsonpb is deprecated proto is deprecated -xxx_messageInfo_ proto.InternalMessageInfo is deprecated proto.EnumName is deprecated proto.ErrInternalBadWireType is deprecated @@ -184,7 +170,12 @@ proto.RegisterExtension is deprecated proto.RegisteredExtension is deprecated proto.RegisteredExtensions is deprecated proto.RegisterMapType is deprecated -proto.Unmarshaler is deprecated' "${SC_OUT}" +proto.Unmarshaler is deprecated +resolver.Backend +resolver.GRPCLB +Target is deprecated: Use the Target field in the BuildOptions instead. +xxx_messageInfo_ +' "${SC_OUT}" # - special golint on package comments. lint_package_comment_per_package() { diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go new file mode 100644 index 000000000..07da5db34 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -0,0 +1,665 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + "math" + "strconv" + "strings" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/set" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Unmarshal reads the given []byte into the given proto.Message. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func Unmarshal(b []byte, m proto.Message) error { + return UnmarshalOptions{}.Unmarshal(b, m) +} + +// UnmarshalOptions is a configurable JSON format parser. +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // If AllowPartial is set, input for messages that will result in missing + // required fields will not return an error. + AllowPartial bool + + // If DiscardUnknown is set, unknown fields are ignored. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling + // google.protobuf.Any messages or extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver + } +} + +// Unmarshal reads the given []byte and populates the given proto.Message +// using options in the UnmarshalOptions object. +// It will clear the message first before setting the fields. +// If it returns an error, the given message may be partially set. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { + return o.unmarshal(b, m) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { + proto.Reset(m) + + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + dec := decoder{json.NewDecoder(b), o} + if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { + return err + } + + // Check for EOF. + tok, err := dec.Read() + if err != nil { + return err + } + if tok.Kind() != json.EOF { + return dec.unexpectedTokenError(tok) + } + + if o.AllowPartial { + return nil + } + return proto.CheckInitialized(m) +} + +type decoder struct { + *json.Decoder + opts UnmarshalOptions +} + +// newError returns an error object with position info. +func (d decoder) newError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("(line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unexpectedTokenError returns a syntax error for the given unexpected token. +func (d decoder) unexpectedTokenError(tok json.Token) error { + return d.syntaxError(tok.Pos(), "unexpected token %s", tok.RawString()) +} + +// syntaxError returns a syntax error for given position. +func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unmarshalMessage unmarshals a message into the given protoreflect.Message. +func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error { + if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { + return unmarshal(d, m) + } + + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + var seenNums set.Ints + var seenOneofs set.Ints + fieldDescs := messageDesc.Fields() + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + return nil + case json.Name: + // Continue below. + } + + name := tok.Name() + // Unmarshaling a non-custom embedded message in Any will contain the + // JSON field "@type" which should be skipped because it is not a field + // of the embedded message, but simply an artifact of the Any format. + if skipTypeURL && name == "@type" { + d.Read() + continue + } + + // Get the FieldDescriptor. + var fd pref.FieldDescriptor + if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { + // Only extension names are in [name] format. + extName := pref.FullName(name[1 : len(name)-1]) + extType, err := d.opts.Resolver.FindExtensionByName(extName) + if err != nil && err != protoregistry.NotFound { + return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err) + } + if extType != nil { + fd = extType.TypeDescriptor() + if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() { + return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName()) + } + } + } else { + // The name can either be the JSON name or the proto field name. + fd = fieldDescs.ByJSONName(name) + if fd == nil { + fd = fieldDescs.ByTextName(name) + } + } + if flags.ProtoLegacy { + if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { + fd = nil // reset since the weak reference is not linked in + } + } + + if fd == nil { + // Field is unknown. + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + + // Do not allow duplicate fields. + num := uint64(fd.Number()) + if seenNums.Has(num) { + return d.newError(tok.Pos(), "duplicate field %v", tok.RawString()) + } + seenNums.Set(num) + + // No need to set values for JSON null unless the field type is + // google.protobuf.Value or google.protobuf.NullValue. + if tok, _ := d.Peek(); tok.Kind() == json.Null && !isKnownValue(fd) && !isNullValue(fd) { + d.Read() + continue + } + + switch { + case fd.IsList(): + list := m.Mutable(fd).List() + if err := d.unmarshalList(list, fd); err != nil { + return err + } + case fd.IsMap(): + mmap := m.Mutable(fd).Map() + if err := d.unmarshalMap(mmap, fd); err != nil { + return err + } + default: + // If field is a oneof, check if it has already been set. + if od := fd.ContainingOneof(); od != nil { + idx := uint64(od.Index()) + if seenOneofs.Has(idx) { + return d.newError(tok.Pos(), "error parsing %s, oneof %v is already set", tok.RawString(), od.FullName()) + } + seenOneofs.Set(idx) + } + + // Required or optional fields. + if err := d.unmarshalSingular(m, fd); err != nil { + return err + } + } + } +} + +func isKnownValue(fd pref.FieldDescriptor) bool { + md := fd.Message() + return md != nil && md.FullName() == genid.Value_message_fullname +} + +func isNullValue(fd pref.FieldDescriptor) bool { + ed := fd.Enum() + return ed != nil && ed.FullName() == genid.NullValue_enum_fullname +} + +// unmarshalSingular unmarshals to the non-repeated field specified +// by the given FieldDescriptor. +func (d decoder) unmarshalSingular(m pref.Message, fd pref.FieldDescriptor) error { + var val pref.Value + var err error + switch fd.Kind() { + case pref.MessageKind, pref.GroupKind: + val = m.NewField(fd) + err = d.unmarshalMessage(val.Message(), false) + default: + val, err = d.unmarshalScalar(fd) + } + + if err != nil { + return err + } + m.Set(fd, val) + return nil +} + +// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by +// the given FieldDescriptor. +func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { + const b32 int = 32 + const b64 int = 64 + + tok, err := d.Read() + if err != nil { + return pref.Value{}, err + } + + kind := fd.Kind() + switch kind { + case pref.BoolKind: + if tok.Kind() == json.Bool { + return pref.ValueOfBool(tok.Bool()), nil + } + + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if v, ok := unmarshalInt(tok, b32); ok { + return v, nil + } + + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if v, ok := unmarshalInt(tok, b64); ok { + return v, nil + } + + case pref.Uint32Kind, pref.Fixed32Kind: + if v, ok := unmarshalUint(tok, b32); ok { + return v, nil + } + + case pref.Uint64Kind, pref.Fixed64Kind: + if v, ok := unmarshalUint(tok, b64); ok { + return v, nil + } + + case pref.FloatKind: + if v, ok := unmarshalFloat(tok, b32); ok { + return v, nil + } + + case pref.DoubleKind: + if v, ok := unmarshalFloat(tok, b64); ok { + return v, nil + } + + case pref.StringKind: + if tok.Kind() == json.String { + return pref.ValueOfString(tok.ParsedString()), nil + } + + case pref.BytesKind: + if v, ok := unmarshalBytes(tok); ok { + return v, nil + } + + case pref.EnumKind: + if v, ok := unmarshalEnum(tok, fd); ok { + return v, nil + } + + default: + panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) + } + + return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) +} + +func unmarshalInt(tok json.Token, bitSize int) (pref.Value, bool) { + switch tok.Kind() { + case json.Number: + return getInt(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return pref.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return pref.Value{}, false + } + return getInt(tok, bitSize) + } + return pref.Value{}, false +} + +func getInt(tok json.Token, bitSize int) (pref.Value, bool) { + n, ok := tok.Int(bitSize) + if !ok { + return pref.Value{}, false + } + if bitSize == 32 { + return pref.ValueOfInt32(int32(n)), true + } + return pref.ValueOfInt64(n), true +} + +func unmarshalUint(tok json.Token, bitSize int) (pref.Value, bool) { + switch tok.Kind() { + case json.Number: + return getUint(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return pref.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return pref.Value{}, false + } + return getUint(tok, bitSize) + } + return pref.Value{}, false +} + +func getUint(tok json.Token, bitSize int) (pref.Value, bool) { + n, ok := tok.Uint(bitSize) + if !ok { + return pref.Value{}, false + } + if bitSize == 32 { + return pref.ValueOfUint32(uint32(n)), true + } + return pref.ValueOfUint64(n), true +} + +func unmarshalFloat(tok json.Token, bitSize int) (pref.Value, bool) { + switch tok.Kind() { + case json.Number: + return getFloat(tok, bitSize) + + case json.String: + s := tok.ParsedString() + switch s { + case "NaN": + if bitSize == 32 { + return pref.ValueOfFloat32(float32(math.NaN())), true + } + return pref.ValueOfFloat64(math.NaN()), true + case "Infinity": + if bitSize == 32 { + return pref.ValueOfFloat32(float32(math.Inf(+1))), true + } + return pref.ValueOfFloat64(math.Inf(+1)), true + case "-Infinity": + if bitSize == 32 { + return pref.ValueOfFloat32(float32(math.Inf(-1))), true + } + return pref.ValueOfFloat64(math.Inf(-1)), true + } + + // Decode number from string. + if len(s) != len(strings.TrimSpace(s)) { + return pref.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return pref.Value{}, false + } + return getFloat(tok, bitSize) + } + return pref.Value{}, false +} + +func getFloat(tok json.Token, bitSize int) (pref.Value, bool) { + n, ok := tok.Float(bitSize) + if !ok { + return pref.Value{}, false + } + if bitSize == 32 { + return pref.ValueOfFloat32(float32(n)), true + } + return pref.ValueOfFloat64(n), true +} + +func unmarshalBytes(tok json.Token) (pref.Value, bool) { + if tok.Kind() != json.String { + return pref.Value{}, false + } + + s := tok.ParsedString() + enc := base64.StdEncoding + if strings.ContainsAny(s, "-_") { + enc = base64.URLEncoding + } + if len(s)%4 != 0 { + enc = enc.WithPadding(base64.NoPadding) + } + b, err := enc.DecodeString(s) + if err != nil { + return pref.Value{}, false + } + return pref.ValueOfBytes(b), true +} + +func unmarshalEnum(tok json.Token, fd pref.FieldDescriptor) (pref.Value, bool) { + switch tok.Kind() { + case json.String: + // Lookup EnumNumber based on name. + s := tok.ParsedString() + if enumVal := fd.Enum().Values().ByName(pref.Name(s)); enumVal != nil { + return pref.ValueOfEnum(enumVal.Number()), true + } + + case json.Number: + if n, ok := tok.Int(32); ok { + return pref.ValueOfEnum(pref.EnumNumber(n)), true + } + + case json.Null: + // This is only valid for google.protobuf.NullValue. + if isNullValue(fd) { + return pref.ValueOfEnum(0), true + } + } + + return pref.Value{}, false +} + +func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ArrayOpen { + return d.unexpectedTokenError(tok) + } + + switch fd.Kind() { + case pref.MessageKind, pref.GroupKind: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val := list.NewElement() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return err + } + list.Append(val) + } + default: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + list.Append(val) + } + } + + return nil +} + +func (d decoder) unmarshalMap(mmap pref.Map, fd pref.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + // Determine ahead whether map entry is a scalar type or a message type in + // order to call the appropriate unmarshalMapValue func inside the for loop + // below. + var unmarshalMapValue func() (pref.Value, error) + switch fd.MapValue().Kind() { + case pref.MessageKind, pref.GroupKind: + unmarshalMapValue = func() (pref.Value, error) { + val := mmap.NewValue() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return pref.Value{}, err + } + return val, nil + } + default: + unmarshalMapValue = func() (pref.Value, error) { + return d.unmarshalScalar(fd.MapValue()) + } + } + +Loop: + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + break Loop + case json.Name: + // Continue. + } + + // Unmarshal field name. + pkey, err := d.unmarshalMapKey(tok, fd.MapKey()) + if err != nil { + return err + } + + // Check for duplicate field name. + if mmap.Has(pkey) { + return d.newError(tok.Pos(), "duplicate map key %v", tok.RawString()) + } + + // Read and unmarshal field value. + pval, err := unmarshalMapValue() + if err != nil { + return err + } + + mmap.Set(pkey, pval) + } + + return nil +} + +// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey. +// A map key type is any integral or string type. +func (d decoder) unmarshalMapKey(tok json.Token, fd pref.FieldDescriptor) (pref.MapKey, error) { + const b32 = 32 + const b64 = 64 + const base10 = 10 + + name := tok.Name() + kind := fd.Kind() + switch kind { + case pref.StringKind: + return pref.ValueOfString(name).MapKey(), nil + + case pref.BoolKind: + switch name { + case "true": + return pref.ValueOfBool(true).MapKey(), nil + case "false": + return pref.ValueOfBool(false).MapKey(), nil + } + + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if n, err := strconv.ParseInt(name, base10, b32); err == nil { + return pref.ValueOfInt32(int32(n)).MapKey(), nil + } + + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if n, err := strconv.ParseInt(name, base10, b64); err == nil { + return pref.ValueOfInt64(int64(n)).MapKey(), nil + } + + case pref.Uint32Kind, pref.Fixed32Kind: + if n, err := strconv.ParseUint(name, base10, b32); err == nil { + return pref.ValueOfUint32(uint32(n)).MapKey(), nil + } + + case pref.Uint64Kind, pref.Fixed64Kind: + if n, err := strconv.ParseUint(name, base10, b64); err == nil { + return pref.ValueOfUint64(uint64(n)).MapKey(), nil + } + + default: + panic(fmt.Sprintf("invalid kind for map key: %v", kind)) + } + + return pref.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go new file mode 100644 index 000000000..00ea2fecf --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protojson marshals and unmarshals protocol buffer messages as JSON +// format. It follows the guide at +// https://developers.google.com/protocol-buffers/docs/proto3#json. +// +// This package produces a different output than the standard "encoding/json" +// package, which does not operate correctly on protocol buffer messages. +package protojson diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go new file mode 100644 index 000000000..ba971f078 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -0,0 +1,344 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const defaultIndent = " " + +// Format formats the message as a multiline string. +// This function is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func Format(m proto.Message) string { + return MarshalOptions{Multiline: true}.Format(m) +} + +// Marshal writes the given proto.Message in JSON format using default options. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func Marshal(m proto.Message) ([]byte, error) { + return MarshalOptions{}.Marshal(m) +} + +// MarshalOptions is a configurable JSON format marshaler. +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Multiline specifies whether the marshaler should format the output in + // indented-form with every textual element on a new line. + // If Indent is an empty string, then an arbitrary indent is chosen. + Multiline bool + + // Indent specifies the set of indentation characters to use in a multiline + // formatted output such that every entry is preceded by Indent and + // terminated by a newline. If non-empty, then Multiline is treated as true. + // Indent can only be composed of space or tab characters. + Indent string + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return error if there are any missing required fields. + AllowPartial bool + + // UseProtoNames uses proto field name instead of lowerCamelCase name in JSON + // field names. + UseProtoNames bool + + // UseEnumNumbers emits enum values as numbers. + UseEnumNumbers bool + + // EmitUnpopulated specifies whether to emit unpopulated fields. It does not + // emit unpopulated oneof fields or unpopulated extension fields. + // The JSON value emitted for unpopulated fields are as follows: + // ╔═══════╤════════════════════════════╗ + // ║ JSON │ Protobuf field ║ + // ╠═══════╪════════════════════════════╣ + // ║ false │ proto3 boolean fields ║ + // ║ 0 │ proto3 numeric fields ║ + // ║ "" │ proto3 string/bytes fields ║ + // ║ null │ proto2 scalar fields ║ + // ║ null │ message fields ║ + // ║ [] │ list fields ║ + // ║ {} │ map fields ║ + // ╚═══════╧════════════════════════════╝ + EmitUnpopulated bool + + // Resolver is used for looking up types when expanding google.protobuf.Any + // messages. If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.ExtensionTypeResolver + protoregistry.MessageTypeResolver + } +} + +// Format formats the message as a string. +// This method is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func (o MarshalOptions) Format(m proto.Message) string { + if m == nil || !m.ProtoReflect().IsValid() { + return "" // invalid syntax, but okay since this is for debugging + } + o.AllowPartial = true + b, _ := o.Marshal(m) + return string(b) +} + +// Marshal marshals the given proto.Message in the JSON format using options in +// MarshalOptions. Do not depend on the output being stable. It may change over +// time across different versions of the program. +func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { + return o.marshal(m) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { + if o.Multiline && o.Indent == "" { + o.Indent = defaultIndent + } + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + internalEnc, err := json.NewEncoder(o.Indent) + if err != nil { + return nil, err + } + + // Treat nil message interface as an empty message, + // in which case the output in an empty JSON object. + if m == nil { + return []byte("{}"), nil + } + + enc := encoder{internalEnc, o} + if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil { + return nil, err + } + if o.AllowPartial { + return enc.Bytes(), nil + } + return enc.Bytes(), proto.CheckInitialized(m) +} + +type encoder struct { + *json.Encoder + opts MarshalOptions +} + +// typeFieldDesc is a synthetic field descriptor used for the "@type" field. +var typeFieldDesc = func() protoreflect.FieldDescriptor { + var fd filedesc.Field + fd.L0.FullName = "@type" + fd.L0.Index = -1 + fd.L1.Cardinality = protoreflect.Optional + fd.L1.Kind = protoreflect.StringKind + return &fd +}() + +// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method +// to additionally iterate over a synthetic field for the type URL. +type typeURLFieldRanger struct { + order.FieldRanger + typeURL string +} + +func (m typeURLFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + if !f(typeFieldDesc, pref.ValueOfString(m.typeURL)) { + return + } + m.FieldRanger.Range(f) +} + +// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range +// method to additionally iterate over unpopulated fields. +type unpopulatedFieldRanger struct{ pref.Message } + +func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if m.Has(fd) || fd.ContainingOneof() != nil { + continue // ignore populated fields and fields within a oneofs + } + + v := m.Get(fd) + isProto2Scalar := fd.Syntax() == pref.Proto2 && fd.Default().IsValid() + isSingularMessage := fd.Cardinality() != pref.Repeated && fd.Message() != nil + if isProto2Scalar || isSingularMessage { + v = pref.Value{} // use invalid value to emit null + } + if !f(fd, v) { + return + } + } + m.Message.Range(f) +} + +// marshalMessage marshals the fields in the given protoreflect.Message. +// If the typeURL is non-empty, then a synthetic "@type" field is injected +// containing the URL as the value. +func (e encoder) marshalMessage(m pref.Message, typeURL string) error { + if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) { + return errors.New("no support for proto1 MessageSets") + } + + if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil { + return marshal(e, m) + } + + e.StartObject() + defer e.EndObject() + + var fields order.FieldRanger = m + if e.opts.EmitUnpopulated { + fields = unpopulatedFieldRanger{m} + } + if typeURL != "" { + fields = typeURLFieldRanger{fields, typeURL} + } + + var err error + order.RangeFields(fields, order.IndexNameFieldOrder, func(fd pref.FieldDescriptor, v pref.Value) bool { + name := fd.JSONName() + if e.opts.UseProtoNames { + name = fd.TextName() + } + + if err = e.WriteName(name); err != nil { + return false + } + if err = e.marshalValue(v, fd); err != nil { + return false + } + return true + }) + return err +} + +// marshalValue marshals the given protoreflect.Value. +func (e encoder) marshalValue(val pref.Value, fd pref.FieldDescriptor) error { + switch { + case fd.IsList(): + return e.marshalList(val.List(), fd) + case fd.IsMap(): + return e.marshalMap(val.Map(), fd) + default: + return e.marshalSingular(val, fd) + } +} + +// marshalSingular marshals the given non-repeated field value. This includes +// all scalar types, enums, messages, and groups. +func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error { + if !val.IsValid() { + e.WriteNull() + return nil + } + + switch kind := fd.Kind(); kind { + case pref.BoolKind: + e.WriteBool(val.Bool()) + + case pref.StringKind: + if e.WriteString(val.String()) != nil { + return errors.InvalidUTF8(string(fd.FullName())) + } + + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + e.WriteInt(val.Int()) + + case pref.Uint32Kind, pref.Fixed32Kind: + e.WriteUint(val.Uint()) + + case pref.Int64Kind, pref.Sint64Kind, pref.Uint64Kind, + pref.Sfixed64Kind, pref.Fixed64Kind: + // 64-bit integers are written out as JSON string. + e.WriteString(val.String()) + + case pref.FloatKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 32) + + case pref.DoubleKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 64) + + case pref.BytesKind: + e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes())) + + case pref.EnumKind: + if fd.Enum().FullName() == genid.NullValue_enum_fullname { + e.WriteNull() + } else { + desc := fd.Enum().Values().ByNumber(val.Enum()) + if e.opts.UseEnumNumbers || desc == nil { + e.WriteInt(int64(val.Enum())) + } else { + e.WriteString(string(desc.Name())) + } + } + + case pref.MessageKind, pref.GroupKind: + if err := e.marshalMessage(val.Message(), ""); err != nil { + return err + } + + default: + panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind)) + } + return nil +} + +// marshalList marshals the given protoreflect.List. +func (e encoder) marshalList(list pref.List, fd pref.FieldDescriptor) error { + e.StartArray() + defer e.EndArray() + + for i := 0; i < list.Len(); i++ { + item := list.Get(i) + if err := e.marshalSingular(item, fd); err != nil { + return err + } + } + return nil +} + +// marshalMap marshals given protoreflect.Map. +func (e encoder) marshalMap(mmap pref.Map, fd pref.FieldDescriptor) error { + e.StartObject() + defer e.EndObject() + + var err error + order.RangeEntries(mmap, order.GenericKeyOrder, func(k pref.MapKey, v pref.Value) bool { + if err = e.WriteName(k.String()); err != nil { + return false + } + if err = e.marshalSingular(v, fd.MapValue()); err != nil { + return false + } + return true + }) + return err +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go new file mode 100644 index 000000000..72924a905 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -0,0 +1,889 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "bytes" + "fmt" + "math" + "strconv" + "strings" + "time" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type marshalFunc func(encoder, pref.Message) error + +// wellKnownTypeMarshaler returns a marshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeMarshaler(name pref.FullName) marshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return encoder.marshalAny + case genid.Timestamp_message_name: + return encoder.marshalTimestamp + case genid.Duration_message_name: + return encoder.marshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return encoder.marshalWrapperType + case genid.Struct_message_name: + return encoder.marshalStruct + case genid.ListValue_message_name: + return encoder.marshalListValue + case genid.Value_message_name: + return encoder.marshalKnownValue + case genid.FieldMask_message_name: + return encoder.marshalFieldMask + case genid.Empty_message_name: + return encoder.marshalEmpty + } + } + return nil +} + +type unmarshalFunc func(decoder, pref.Message) error + +// wellKnownTypeUnmarshaler returns a unmarshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeUnmarshaler(name pref.FullName) unmarshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return decoder.unmarshalAny + case genid.Timestamp_message_name: + return decoder.unmarshalTimestamp + case genid.Duration_message_name: + return decoder.unmarshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return decoder.unmarshalWrapperType + case genid.Struct_message_name: + return decoder.unmarshalStruct + case genid.ListValue_message_name: + return decoder.unmarshalListValue + case genid.Value_message_name: + return decoder.unmarshalKnownValue + case genid.FieldMask_message_name: + return decoder.unmarshalFieldMask + case genid.Empty_message_name: + return decoder.unmarshalEmpty + } + } + return nil +} + +// The JSON representation of an Any message uses the regular representation of +// the deserialized, embedded message, with an additional field `@type` which +// contains the type URL. If the embedded message type is well-known and has a +// custom JSON representation, that representation will be embedded adding a +// field `value` which holds the custom JSON in addition to the `@type` field. + +func (e encoder) marshalAny(m pref.Message) error { + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + if !m.Has(fdType) { + if !m.Has(fdValue) { + // If message is empty, marshal out empty JSON object. + e.StartObject() + e.EndObject() + return nil + } else { + // Return error if type_url field is not set, but value is set. + return errors.New("%s: %v is not set", genid.Any_message_fullname, genid.Any_TypeUrl_field_name) + } + } + + typeVal := m.Get(fdType) + valueVal := m.Get(fdValue) + + // Resolve the type in order to unmarshal value field. + typeURL := typeVal.String() + emt, err := e.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err) + } + + em := emt.New() + err = proto.UnmarshalOptions{ + AllowPartial: true, // never check required fields inside an Any + Resolver: e.opts.Resolver, + }.Unmarshal(valueVal.Bytes(), em.Interface()) + if err != nil { + return errors.New("%s: unable to unmarshal %q: %v", genid.Any_message_fullname, typeURL, err) + } + + // If type of value has custom JSON encoding, marshal out a field "value" + // with corresponding custom JSON encoding of the embedded message as a + // field. + if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil { + e.StartObject() + defer e.EndObject() + + // Marshal out @type field. + e.WriteName("@type") + if err := e.WriteString(typeURL); err != nil { + return err + } + + e.WriteName("value") + return marshal(e, em) + } + + // Else, marshal out the embedded message's fields in this Any object. + if err := e.marshalMessage(em, typeURL); err != nil { + return err + } + + return nil +} + +func (d decoder) unmarshalAny(m pref.Message) error { + // Peek to check for json.ObjectOpen to avoid advancing a read. + start, err := d.Peek() + if err != nil { + return err + } + if start.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(start) + } + + // Use another decoder to parse the unread bytes for @type field. This + // avoids advancing a read from current decoder because the current JSON + // object may contain the fields of the embedded type. + dec := decoder{d.Clone(), UnmarshalOptions{}} + tok, err := findTypeURL(dec) + switch err { + case errEmptyObject: + // An empty JSON object translates to an empty Any message. + d.Read() // Read json.ObjectOpen. + d.Read() // Read json.ObjectClose. + return nil + + case errMissingType: + if d.opts.DiscardUnknown { + // Treat all fields as unknowns, similar to an empty object. + return d.skipJSONValue() + } + // Use start.Pos() for line position. + return d.newError(start.Pos(), err.Error()) + + default: + if err != nil { + return err + } + } + + typeURL := tok.ParsedString() + emt, err := d.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return d.newError(tok.Pos(), "unable to resolve %v: %q", tok.RawString(), err) + } + + // Create new message for the embedded message type and unmarshal into it. + em := emt.New() + if unmarshal := wellKnownTypeUnmarshaler(emt.Descriptor().FullName()); unmarshal != nil { + // If embedded message is a custom type, + // unmarshal the JSON "value" field into it. + if err := d.unmarshalAnyValue(unmarshal, em); err != nil { + return err + } + } else { + // Else unmarshal the current JSON object into it. + if err := d.unmarshalMessage(em, true); err != nil { + return err + } + } + // Serialize the embedded message and assign the resulting bytes to the + // proto value field. + b, err := proto.MarshalOptions{ + AllowPartial: true, // No need to check required fields inside an Any. + Deterministic: true, + }.Marshal(em.Interface()) + if err != nil { + return d.newError(start.Pos(), "error in marshaling Any.value field: %v", err) + } + + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + m.Set(fdType, pref.ValueOfString(typeURL)) + m.Set(fdValue, pref.ValueOfBytes(b)) + return nil +} + +var errEmptyObject = fmt.Errorf(`empty object`) +var errMissingType = fmt.Errorf(`missing "@type" field`) + +// findTypeURL returns the token for the "@type" field value from the given +// JSON bytes. It is expected that the given bytes start with json.ObjectOpen. +// It returns errEmptyObject if the JSON object is empty or errMissingType if +// @type field does not exist. It returns other error if the @type field is not +// valid or other decoding issues. +func findTypeURL(d decoder) (json.Token, error) { + var typeURL string + var typeTok json.Token + numFields := 0 + // Skip start object. + d.Read() + +Loop: + for { + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + + switch tok.Kind() { + case json.ObjectClose: + if typeURL == "" { + // Did not find @type field. + if numFields > 0 { + return json.Token{}, errMissingType + } + return json.Token{}, errEmptyObject + } + break Loop + + case json.Name: + numFields++ + if tok.Name() != "@type" { + // Skip value. + if err := d.skipJSONValue(); err != nil { + return json.Token{}, err + } + continue + } + + // Return error if this was previously set already. + if typeURL != "" { + return json.Token{}, d.newError(tok.Pos(), `duplicate "@type" field`) + } + // Read field value. + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + if tok.Kind() != json.String { + return json.Token{}, d.newError(tok.Pos(), `@type field value is not a string: %v`, tok.RawString()) + } + typeURL = tok.ParsedString() + if typeURL == "" { + return json.Token{}, d.newError(tok.Pos(), `@type field contains empty value`) + } + typeTok = tok + } + } + + return typeTok, nil +} + +// skipJSONValue parses a JSON value (null, boolean, string, number, object and +// array) in order to advance the read to the next JSON value. It relies on +// the decoder returning an error if the types are not in valid sequence. +func (d decoder) skipJSONValue() error { + tok, err := d.Read() + if err != nil { + return err + } + // Only need to continue reading for objects and arrays. + switch tok.Kind() { + case json.ObjectOpen: + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + return nil + case json.Name: + // Skip object field value. + if err := d.skipJSONValue(); err != nil { + return err + } + } + } + + case json.ArrayOpen: + for { + tok, err := d.Peek() + if err != nil { + return err + } + switch tok.Kind() { + case json.ArrayClose: + d.Read() + return nil + default: + // Skip array item. + if err := d.skipJSONValue(); err != nil { + return err + } + } + } + } + return nil +} + +// unmarshalAnyValue unmarshals the given custom-type message from the JSON +// object's "value" field. +func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m pref.Message) error { + // Skip ObjectOpen, and start reading the fields. + d.Read() + + var found bool // Used for detecting duplicate "value". + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + if !found { + return d.newError(tok.Pos(), `missing "value" field`) + } + return nil + + case json.Name: + switch tok.Name() { + case "@type": + // Skip the value as this was previously parsed already. + d.Read() + + case "value": + if found { + return d.newError(tok.Pos(), `duplicate "value" field`) + } + // Unmarshal the field value into the given message. + if err := unmarshal(d, m); err != nil { + return err + } + found = true + + default: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + } + } +} + +// Wrapper types are encoded as JSON primitives like string, number or boolean. + +func (e encoder) marshalWrapperType(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val := m.Get(fd) + return e.marshalSingular(val, fd) +} + +func (d decoder) unmarshalWrapperType(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + m.Set(fd, val) + return nil +} + +// The JSON representation for Empty is an empty JSON object. + +func (e encoder) marshalEmpty(pref.Message) error { + e.StartObject() + e.EndObject() + return nil +} + +func (d decoder) unmarshalEmpty(pref.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + return nil + + case json.Name: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + + default: + return d.unexpectedTokenError(tok) + } + } +} + +// The JSON representation for Struct is a JSON object that contains the encoded +// Struct.fields map and follows the serialization rules for a map. + +func (e encoder) marshalStruct(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return e.marshalMap(m.Get(fd).Map(), fd) +} + +func (d decoder) unmarshalStruct(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return d.unmarshalMap(m.Mutable(fd).Map(), fd) +} + +// The JSON representation for ListValue is JSON array that contains the encoded +// ListValue.values repeated field and follows the serialization rules for a +// repeated field. + +func (e encoder) marshalListValue(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return e.marshalList(m.Get(fd).List(), fd) +} + +func (d decoder) unmarshalListValue(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return d.unmarshalList(m.Mutable(fd).List(), fd) +} + +// The JSON representation for a Value is dependent on the oneof field that is +// set. Each of the field in the oneof has its own custom serialization rule. A +// Value message needs to be a oneof field set, else it is an error. + +func (e encoder) marshalKnownValue(m pref.Message) error { + od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname) + } + if fd.Number() == genid.Value_NumberValue_field_number { + if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) { + return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v) + } + } + return e.marshalSingular(m.Get(fd), fd) +} + +func (d decoder) unmarshalKnownValue(m pref.Message) error { + tok, err := d.Peek() + if err != nil { + return err + } + + var fd pref.FieldDescriptor + var val pref.Value + switch tok.Kind() { + case json.Null: + d.Read() + fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number) + val = pref.ValueOfEnum(0) + + case json.Bool: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number) + val = pref.ValueOfBool(tok.Bool()) + + case json.Number: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_NumberValue_field_number) + var ok bool + val, ok = unmarshalFloat(tok, 64) + if !ok { + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + case json.String: + // A JSON string may have been encoded from the number_value field, + // e.g. "NaN", "Infinity", etc. Parsing a proto double type also allows + // for it to be in JSON string form. Given this custom encoding spec, + // however, there is no way to identify that and hence a JSON string is + // always assigned to the string_value field, which means that certain + // encoding cannot be parsed back to the same field. + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number) + val = pref.ValueOfString(tok.ParsedString()) + + case json.ObjectOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalStruct(val.Message()); err != nil { + return err + } + + case json.ArrayOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_ListValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalListValue(val.Message()); err != nil { + return err + } + + default: + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + m.Set(fd, val) + return nil +} + +// The JSON representation for a Duration is a JSON string that ends in the +// suffix "s" (indicating seconds) and is preceded by the number of seconds, +// with nanoseconds expressed as fractional seconds. +// +// Durations less than one second are represented with a 0 seconds field and a +// positive or negative nanos field. For durations of one second or more, a +// non-zero value for the nanos field must be of the same sign as the seconds +// field. +// +// Duration.seconds must be from -315,576,000,000 to +315,576,000,000 inclusive. +// Duration.nanos must be from -999,999,999 to +999,999,999 inclusive. + +const ( + secondsInNanos = 999999999 + maxSecondsInDuration = 315576000000 +) + +func (e encoder) marshalDuration(m pref.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return errors.New("%s: seconds out of range %v", genid.Duration_message_fullname, secs) + } + if nanos < -secondsInNanos || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Duration_message_fullname, nanos) + } + if (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) { + return errors.New("%s: signs of seconds and nanos do not match", genid.Duration_message_fullname) + } + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + var sign string + if secs < 0 || nanos < 0 { + sign, secs, nanos = "-", -1*secs, -1*nanos + } + x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "s") + return nil +} + +func (d decoder) unmarshalDuration(m pref.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + secs, nanos, ok := parseDuration(tok.ParsedString()) + if !ok { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Duration_message_fullname, tok.RawString()) + } + // Validate seconds. No need to validate nanos because parseDuration would + // have covered that already. + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Duration_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + m.Set(fdSeconds, pref.ValueOfInt64(secs)) + m.Set(fdNanos, pref.ValueOfInt32(nanos)) + return nil +} + +// parseDuration parses the given input string for seconds and nanoseconds value +// for the Duration JSON format. The format is a decimal number with a suffix +// 's'. It can have optional plus/minus sign. There needs to be at least an +// integer or fractional part. Fractional part is limited to 9 digits only for +// nanoseconds precision, regardless of whether there are trailing zero digits. +// Example values are 1s, 0.1s, 1.s, .1s, +1s, -1s, -.1s. +func parseDuration(input string) (int64, int32, bool) { + b := []byte(input) + size := len(b) + if size < 2 { + return 0, 0, false + } + if b[size-1] != 's' { + return 0, 0, false + } + b = b[:size-1] + + // Read optional plus/minus symbol. + var neg bool + switch b[0] { + case '-': + neg = true + b = b[1:] + case '+': + b = b[1:] + } + if len(b) == 0 { + return 0, 0, false + } + + // Read the integer part. + var intp []byte + switch { + case b[0] == '0': + b = b[1:] + + case '1' <= b[0] && b[0] <= '9': + intp = b[0:] + b = b[1:] + n := 1 + for len(b) > 0 && '0' <= b[0] && b[0] <= '9' { + n++ + b = b[1:] + } + intp = intp[:n] + + case b[0] == '.': + // Continue below. + + default: + return 0, 0, false + } + + hasFrac := false + var frac [9]byte + if len(b) > 0 { + if b[0] != '.' { + return 0, 0, false + } + // Read the fractional part. + b = b[1:] + n := 0 + for len(b) > 0 && n < 9 && '0' <= b[0] && b[0] <= '9' { + frac[n] = b[0] + n++ + b = b[1:] + } + // It is not valid if there are more bytes left. + if len(b) > 0 { + return 0, 0, false + } + // Pad fractional part with 0s. + for i := n; i < 9; i++ { + frac[i] = '0' + } + hasFrac = true + } + + var secs int64 + if len(intp) > 0 { + var err error + secs, err = strconv.ParseInt(string(intp), 10, 64) + if err != nil { + return 0, 0, false + } + } + + var nanos int64 + if hasFrac { + nanob := bytes.TrimLeft(frac[:], "0") + if len(nanob) > 0 { + var err error + nanos, err = strconv.ParseInt(string(nanob), 10, 32) + if err != nil { + return 0, 0, false + } + } + } + + if neg { + if secs > 0 { + secs = -secs + } + if nanos > 0 { + nanos = -nanos + } + } + return secs, int32(nanos), true +} + +// The JSON representation for a Timestamp is a JSON string in the RFC 3339 +// format, i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" where +// {year} is always expressed using four digits while {month}, {day}, {hour}, +// {min}, and {sec} are zero-padded to two digits each. The fractional seconds, +// which can go up to 9 digits, up to 1 nanosecond resolution, is optional. The +// "Z" suffix indicates the timezone ("UTC"); the timezone is required. Encoding +// should always use UTC (as indicated by "Z") and a decoder should be able to +// accept both UTC and other timezones (as indicated by an offset). +// +// Timestamp.seconds must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z +// inclusive. +// Timestamp.nanos must be from 0 to 999,999,999 inclusive. + +const ( + maxTimestampSeconds = 253402300799 + minTimestampSeconds = -62135596800 +) + +func (e encoder) marshalTimestamp(m pref.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return errors.New("%s: seconds out of range %v", genid.Timestamp_message_fullname, secs) + } + if nanos < 0 || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Timestamp_message_fullname, nanos) + } + // Uses RFC 3339, where generated output will be Z-normalized and uses 0, 3, + // 6 or 9 fractional digits. + t := time.Unix(secs, nanos).UTC() + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "Z") + return nil +} + +func (d decoder) unmarshalTimestamp(m pref.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + t, err := time.Parse(time.RFC3339Nano, tok.ParsedString()) + if err != nil { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate seconds. No need to validate nanos because time.Parse would have + // covered that already. + secs := t.Unix() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + m.Set(fdSeconds, pref.ValueOfInt64(secs)) + m.Set(fdNanos, pref.ValueOfInt32(int32(t.Nanosecond()))) + return nil +} + +// The JSON representation for a FieldMask is a JSON string where paths are +// separated by a comma. Fields name in each path are converted to/from +// lower-camel naming conventions. Encoding should fail if the path name would +// end up differently after a round-trip. + +func (e encoder) marshalFieldMask(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Get(fd).List() + paths := make([]string, 0, list.Len()) + + for i := 0; i < list.Len(); i++ { + s := list.Get(i).String() + if !pref.FullName(s).IsValid() { + return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s) + } + // Return error if conversion to camelCase is not reversible. + cc := strs.JSONCamelCase(s) + if s != strs.JSONSnakeCase(cc) { + return errors.New("%s contains irreversible value %q", genid.FieldMask_Paths_field_fullname, s) + } + paths = append(paths, cc) + } + + e.WriteString(strings.Join(paths, ",")) + return nil +} + +func (d decoder) unmarshalFieldMask(m pref.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + str := strings.TrimSpace(tok.ParsedString()) + if str == "" { + return nil + } + paths := strings.Split(str, ",") + + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Mutable(fd).List() + + for _, s0 := range paths { + s := strs.JSONSnakeCase(s0) + if strings.Contains(s0, "_") || !pref.FullName(s).IsValid() { + return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0) + } + list.Append(pref.ValueOfString(s)) + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index cab95a427..179d6e8fc 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -6,7 +6,6 @@ package prototext import ( "fmt" - "strings" "unicode/utf8" "google.golang.org/protobuf/internal/encoding/messageset" @@ -23,6 +22,7 @@ import ( ) // Unmarshal reads the given []byte into the given proto.Message. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) } @@ -51,8 +51,9 @@ type UnmarshalOptions struct { } } -// Unmarshal reads the given []byte and populates the given proto.Message using options in -// UnmarshalOptions object. +// Unmarshal reads the given []byte and populates the given proto.Message +// using options in the UnmarshalOptions object. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { return o.unmarshal(b, m) } @@ -158,21 +159,11 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { switch tok.NameKind() { case text.IdentName: name = pref.Name(tok.IdentName()) - fd = fieldDescs.ByName(name) - if fd == nil { - // The proto name of a group field is in all lowercase, - // while the textproto field name is the group message name. - gd := fieldDescs.ByName(pref.Name(strings.ToLower(string(name)))) - if gd != nil && gd.Kind() == pref.GroupKind && gd.Message().Name() == name { - fd = gd - } - } else if fd.Kind() == pref.GroupKind && fd.Message().Name() != name { - fd = nil // reset since field name is actually the message name - } + fd = fieldDescs.ByTextName(string(name)) case text.TypeName: // Handle extensions only. This code path is not for Any. - xt, xtErr = d.findExtension(pref.FullName(tok.TypeName())) + xt, xtErr = d.opts.Resolver.FindExtensionByName(pref.FullName(tok.TypeName())) case text.FieldNumber: isFieldNumberName = true @@ -269,15 +260,6 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { return nil } -// findExtension returns protoreflect.ExtensionType from the Resolver if found. -func (d decoder) findExtension(xtName pref.FullName) (pref.ExtensionType, error) { - xt, err := d.opts.Resolver.FindExtensionByName(xtName) - if err == nil { - return xt, nil - } - return messageset.FindMessageSetExtension(d.opts.Resolver, xtName) -} - // unmarshalSingular unmarshals a non-repeated field value specified by the // given FieldDescriptor. func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error { @@ -762,9 +744,6 @@ func (d decoder) skipValue() error { // Skip items. This will not validate whether skipped values are // of the same type or not, same behavior as C++ // TextFormat::Parser::AllowUnknownField(true) version 3.8.0. - if err := d.skipValue(); err != nil { - return err - } } } } diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 0877d71c5..8d5304dc5 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -6,7 +6,6 @@ package prototext import ( "fmt" - "sort" "strconv" "unicode/utf8" @@ -16,10 +15,11 @@ import ( "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/internal/genid" - "google.golang.org/protobuf/internal/mapsort" + "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -169,35 +169,15 @@ func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { // If unable to expand, continue on to marshal Any as a regular message. } - // Marshal known fields. - fieldDescs := messageDesc.Fields() - size := fieldDescs.Len() - for i := 0; i < size; { - fd := fieldDescs.Get(i) - if od := fd.ContainingOneof(); od != nil { - fd = m.WhichOneof(od) - i += od.Fields().Len() - } else { - i++ - } - - if fd == nil || !m.Has(fd) { - continue - } - - name := fd.Name() - // Use type name for group field name. - if fd.Kind() == pref.GroupKind { - name = fd.Message().Name() - } - val := m.Get(fd) - if err := e.marshalField(string(name), val, fd); err != nil { - return err + // Marshal fields. + var err error + order.RangeFields(m, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if err = e.marshalField(fd.TextName(), v, fd); err != nil { + return false } - } - - // Marshal extensions. - if err := e.marshalExtensions(m); err != nil { + return true + }) + if err != nil { return err } @@ -290,7 +270,7 @@ func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescripto // marshalMap marshals the given protoreflect.Map as multiple name-value fields. func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error { var err error - mapsort.Range(mmap, fd.MapKey().Kind(), func(key pref.MapKey, val pref.Value) bool { + order.RangeEntries(mmap, order.GenericKeyOrder, func(key pref.MapKey, val pref.Value) bool { e.WriteName(name) e.StartMessage() defer e.EndMessage() @@ -311,48 +291,6 @@ func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) return err } -// marshalExtensions marshals extension fields. -func (e encoder) marshalExtensions(m pref.Message) error { - type entry struct { - key string - value pref.Value - desc pref.FieldDescriptor - } - - // Get a sorted list based on field key first. - var entries []entry - m.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { - if !fd.IsExtension() { - return true - } - // For MessageSet extensions, the name used is the parent message. - name := fd.FullName() - if messageset.IsMessageSetExtension(fd) { - name = name.Parent() - } - entries = append(entries, entry{ - key: string(name), - value: v, - desc: fd, - }) - return true - }) - // Sort extensions lexicographically. - sort.Slice(entries, func(i, j int) bool { - return entries[i].key < entries[j].key - }) - - // Write out sorted list. - for _, entry := range entries { - // Extension field name is the proto field name enclosed in []. - name := "[" + entry.key + "]" - if err := e.marshalField(name, entry.value, entry.desc); err != nil { - return err - } - } - return nil -} - // marshalUnknown parses the given []byte and marshals fields out. // This function assumes proper encoding in the given []byte. func (e encoder) marshalUnknown(b []byte) { diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index a427f8b70..9c61112f5 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -21,10 +21,11 @@ import ( type Number int32 const ( - MinValidNumber Number = 1 - FirstReservedNumber Number = 19000 - LastReservedNumber Number = 19999 - MaxValidNumber Number = 1<<29 - 1 + MinValidNumber Number = 1 + FirstReservedNumber Number = 19000 + LastReservedNumber Number = 19999 + MaxValidNumber Number = 1<<29 - 1 + DefaultRecursionLimit = 10000 ) // IsValid reports whether the field number is semantically valid. @@ -55,6 +56,7 @@ const ( errCodeOverflow errCodeReserved errCodeEndGroup + errCodeRecursionDepth ) var ( @@ -112,6 +114,10 @@ func ConsumeField(b []byte) (Number, Type, int) { // When parsing a group, the length includes the end group marker and // the end group is verified to match the starting field number. func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { + return consumeFieldValueD(num, typ, b, DefaultRecursionLimit) +} + +func consumeFieldValueD(num Number, typ Type, b []byte, depth int) (n int) { switch typ { case VarintType: _, n = ConsumeVarint(b) @@ -126,6 +132,9 @@ func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { _, n = ConsumeBytes(b) return n case StartGroupType: + if depth < 0 { + return errCodeRecursionDepth + } n0 := len(b) for { num2, typ2, n := ConsumeTag(b) @@ -140,7 +149,7 @@ func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { return n0 - len(b) } - n = ConsumeFieldValue(num2, typ2, b) + n = consumeFieldValueD(num2, typ2, b, depth-1) if n < 0 { return n // forward error code } diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index e7af0fe0d..360c63329 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -42,6 +42,8 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { name = "FileImports" case pref.Descriptor: name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s" + default: + name = reflect.ValueOf(vs).Elem().Type().Name() } start, end = name+"{", "}" } diff --git a/vendor/google.golang.org/protobuf/internal/detrand/rand.go b/vendor/google.golang.org/protobuf/internal/detrand/rand.go index a904dd1f9..49c8676d4 100644 --- a/vendor/google.golang.org/protobuf/internal/detrand/rand.go +++ b/vendor/google.golang.org/protobuf/internal/detrand/rand.go @@ -26,6 +26,14 @@ func Bool() bool { return randSeed%2 == 1 } +// Intn returns a deterministically random integer between 0 and n-1, inclusive. +func Intn(n int) int { + if n <= 0 { + panic("must be positive") + } + return int(randSeed % uint64(n)) +} + // randSeed is a best-effort at an approximate hash of the Go binary. var randSeed = binaryHash() diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go new file mode 100644 index 000000000..b13fd29e8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -0,0 +1,340 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "io" + "regexp" + "unicode/utf8" + + "google.golang.org/protobuf/internal/errors" +) + +// call specifies which Decoder method was invoked. +type call uint8 + +const ( + readCall call = iota + peekCall +) + +const unexpectedFmt = "unexpected token %s" + +// ErrUnexpectedEOF means that EOF was encountered in the middle of the input. +var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF) + +// Decoder is a token-based JSON decoder. +type Decoder struct { + // lastCall is last method called, either readCall or peekCall. + // Initial value is readCall. + lastCall call + + // lastToken contains the last read token. + lastToken Token + + // lastErr contains the last read error. + lastErr error + + // openStack is a stack containing ObjectOpen and ArrayOpen values. The + // top of stack represents the object or the array the current value is + // directly located in. + openStack []Kind + + // orig is used in reporting line and column. + orig []byte + // in contains the unconsumed input. + in []byte +} + +// NewDecoder returns a Decoder to read the given []byte. +func NewDecoder(b []byte) *Decoder { + return &Decoder{orig: b, in: b} +} + +// Peek looks ahead and returns the next token kind without advancing a read. +func (d *Decoder) Peek() (Token, error) { + defer func() { d.lastCall = peekCall }() + if d.lastCall == readCall { + d.lastToken, d.lastErr = d.Read() + } + return d.lastToken, d.lastErr +} + +// Read returns the next JSON token. +// It will return an error if there is no valid token. +func (d *Decoder) Read() (Token, error) { + const scalar = Null | Bool | Number | String + + defer func() { d.lastCall = readCall }() + if d.lastCall == peekCall { + return d.lastToken, d.lastErr + } + + tok, err := d.parseNext() + if err != nil { + return Token{}, err + } + + switch tok.kind { + case EOF: + if len(d.openStack) != 0 || + d.lastToken.kind&scalar|ObjectClose|ArrayClose == 0 { + return Token{}, ErrUnexpectedEOF + } + + case Null: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case Bool, Number: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case String: + if d.isValueNext() { + break + } + // This string token should only be for a field name. + if d.lastToken.kind&(ObjectOpen|comma) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + if len(d.in) == 0 { + return Token{}, ErrUnexpectedEOF + } + if c := d.in[0]; c != ':' { + return Token{}, d.newSyntaxError(d.currPos(), `unexpected character %s, missing ":" after field name`, string(c)) + } + tok.kind = Name + d.consume(1) + + case ObjectOpen, ArrayOpen: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = append(d.openStack, tok.kind) + + case ObjectClose: + if len(d.openStack) == 0 || + d.lastToken.kind == comma || + d.openStack[len(d.openStack)-1] != ObjectOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case ArrayClose: + if len(d.openStack) == 0 || + d.lastToken.kind == comma || + d.openStack[len(d.openStack)-1] != ArrayOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case comma: + if len(d.openStack) == 0 || + d.lastToken.kind&(scalar|ObjectClose|ArrayClose) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + } + + // Update d.lastToken only after validating token to be in the right sequence. + d.lastToken = tok + + if d.lastToken.kind == comma { + return d.Read() + } + return tok, nil +} + +// Any sequence that looks like a non-delimiter (for error reporting). +var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9]{1,32}|.)`) + +// parseNext parses for the next JSON token. It returns a Token object for +// different types, except for Name. It does not handle whether the next token +// is in a valid sequence or not. +func (d *Decoder) parseNext() (Token, error) { + // Trim leading spaces. + d.consume(0) + + in := d.in + if len(in) == 0 { + return d.consumeToken(EOF, 0), nil + } + + switch in[0] { + case 'n': + if n := matchWithDelim("null", in); n != 0 { + return d.consumeToken(Null, n), nil + } + + case 't': + if n := matchWithDelim("true", in); n != 0 { + return d.consumeBoolToken(true, n), nil + } + + case 'f': + if n := matchWithDelim("false", in); n != 0 { + return d.consumeBoolToken(false, n), nil + } + + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + if n, ok := parseNumber(in); ok { + return d.consumeToken(Number, n), nil + } + + case '"': + s, n, err := d.parseString(in) + if err != nil { + return Token{}, err + } + return d.consumeStringToken(s, n), nil + + case '{': + return d.consumeToken(ObjectOpen, 1), nil + + case '}': + return d.consumeToken(ObjectClose, 1), nil + + case '[': + return d.consumeToken(ArrayOpen, 1), nil + + case ']': + return d.consumeToken(ArrayClose, 1), nil + + case ',': + return d.consumeToken(comma, 1), nil + } + return Token{}, d.newSyntaxError(d.currPos(), "invalid value %s", errRegexp.Find(in)) +} + +// newSyntaxError returns an error with line and column information useful for +// syntax errors. +func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error { + e := errors.New(f, x...) + line, column := d.Position(pos) + return errors.New("syntax error (line %d:%d): %v", line, column, e) +} + +// Position returns line and column number of given index of the original input. +// It will panic if index is out of range. +func (d *Decoder) Position(idx int) (line int, column int) { + b := d.orig[:idx] + line = bytes.Count(b, []byte("\n")) + 1 + if i := bytes.LastIndexByte(b, '\n'); i >= 0 { + b = b[i+1:] + } + column = utf8.RuneCount(b) + 1 // ignore multi-rune characters + return line, column +} + +// currPos returns the current index position of d.in from d.orig. +func (d *Decoder) currPos() int { + return len(d.orig) - len(d.in) +} + +// matchWithDelim matches s with the input b and verifies that the match +// terminates with a delimiter of some form (e.g., r"[^-+_.a-zA-Z0-9]"). +// As a special case, EOF is considered a delimiter. It returns the length of s +// if there is a match, else 0. +func matchWithDelim(s string, b []byte) int { + if !bytes.HasPrefix(b, []byte(s)) { + return 0 + } + + n := len(s) + if n < len(b) && isNotDelim(b[n]) { + return 0 + } + return n +} + +// isNotDelim returns true if given byte is a not delimiter character. +func isNotDelim(c byte) bool { + return (c == '-' || c == '+' || c == '.' || c == '_' || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9')) +} + +// consume consumes n bytes of input and any subsequent whitespace. +func (d *Decoder) consume(n int) { + d.in = d.in[n:] + for len(d.in) > 0 { + switch d.in[0] { + case ' ', '\n', '\r', '\t': + d.in = d.in[1:] + default: + return + } + } +} + +// isValueNext returns true if next type should be a JSON value: Null, +// Number, String or Bool. +func (d *Decoder) isValueNext() bool { + if len(d.openStack) == 0 { + return d.lastToken.kind == 0 + } + + start := d.openStack[len(d.openStack)-1] + switch start { + case ObjectOpen: + return d.lastToken.kind&Name != 0 + case ArrayOpen: + return d.lastToken.kind&(ArrayOpen|comma) != 0 + } + panic(fmt.Sprintf( + "unreachable logic in Decoder.isValueNext, lastToken.kind: %v, openStack: %v", + d.lastToken.kind, start)) +} + +// consumeToken constructs a Token for given Kind with raw value derived from +// current d.in and given size, and consumes the given size-lenght of it. +func (d *Decoder) consumeToken(kind Kind, size int) Token { + tok := Token{ + kind: kind, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + } + d.consume(size) + return tok +} + +// consumeBoolToken constructs a Token for a Bool kind with raw value derived from +// current d.in and given size. +func (d *Decoder) consumeBoolToken(b bool, size int) Token { + tok := Token{ + kind: Bool, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + boo: b, + } + d.consume(size) + return tok +} + +// consumeStringToken constructs a Token for a String kind with raw value derived +// from current d.in and given size. +func (d *Decoder) consumeStringToken(s string, size int) Token { + tok := Token{ + kind: String, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + str: s, + } + d.consume(size) + return tok +} + +// Clone returns a copy of the Decoder for use in reading ahead the next JSON +// object, array or other values without affecting current Decoder. +func (d *Decoder) Clone() *Decoder { + ret := *d + ret.openStack = append([]Kind(nil), ret.openStack...) + return &ret +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go new file mode 100644 index 000000000..2999d7133 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go @@ -0,0 +1,254 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "strconv" +) + +// parseNumber reads the given []byte for a valid JSON number. If it is valid, +// it returns the number of bytes. Parsing logic follows the definition in +// https://tools.ietf.org/html/rfc7159#section-6, and is based off +// encoding/json.isValidNumber function. +func parseNumber(input []byte) (int, bool) { + var n int + + s := input + if len(s) == 0 { + return 0, false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + + // Digits + switch { + case s[0] == '0': + s = s[1:] + n++ + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + n++ + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + + default: + return 0, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + n += 2 + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + n++ + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // Check that next byte is a delimiter or it is at the end. + if n < len(input) && isNotDelim(input[n]) { + return 0, false + } + + return n, true +} + +// numberParts is the result of parsing out a valid JSON number. It contains +// the parts of a number. The parts are used for integer conversion. +type numberParts struct { + neg bool + intp []byte + frac []byte + exp []byte +} + +// parseNumber constructs numberParts from given []byte. The logic here is +// similar to consumeNumber above with the difference of having to construct +// numberParts. The slice fields in numberParts are subslices of the input. +func parseNumberParts(input []byte) (numberParts, bool) { + var neg bool + var intp []byte + var frac []byte + var exp []byte + + s := input + if len(s) == 0 { + return numberParts{}, false + } + + // Optional - + if s[0] == '-' { + neg = true + s = s[1:] + if len(s) == 0 { + return numberParts{}, false + } + } + + // Digits + switch { + case s[0] == '0': + // Skip first 0 and no need to store. + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + intp = s + n := 1 + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + intp = intp[:n] + + default: + return numberParts{}, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + frac = s[1:] + n := 1 + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + frac = frac[:n] + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + exp = s + n := 0 + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return numberParts{}, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + exp = exp[:n] + } + + return numberParts{ + neg: neg, + intp: intp, + frac: bytes.TrimRight(frac, "0"), // Remove unnecessary 0s to the right. + exp: exp, + }, true +} + +// normalizeToIntString returns an integer string in normal form without the +// E-notation for given numberParts. It will return false if it is not an +// integer or if the exponent exceeds than max/min int value. +func normalizeToIntString(n numberParts) (string, bool) { + intpSize := len(n.intp) + fracSize := len(n.frac) + + if intpSize == 0 && fracSize == 0 { + return "0", true + } + + var exp int + if len(n.exp) > 0 { + i, err := strconv.ParseInt(string(n.exp), 10, 32) + if err != nil { + return "", false + } + exp = int(i) + } + + var num []byte + if exp >= 0 { + // For positive E, shift fraction digits into integer part and also pad + // with zeroes as needed. + + // If there are more digits in fraction than the E value, then the + // number is not an integer. + if fracSize > exp { + return "", false + } + + // Make sure resulting digits are within max value limit to avoid + // unnecessarily constructing a large byte slice that may simply fail + // later on. + const maxDigits = 20 // Max uint64 value has 20 decimal digits. + if intpSize+exp > maxDigits { + return "", false + } + + // Set cap to make a copy of integer part when appended. + num = n.intp[:len(n.intp):len(n.intp)] + num = append(num, n.frac...) + for i := 0; i < exp-fracSize; i++ { + num = append(num, '0') + } + } else { + // For negative E, shift digits in integer part out. + + // If there are fractions, then the number is not an integer. + if fracSize > 0 { + return "", false + } + + // index is where the decimal point will be after adjusting for negative + // exponent. + index := intpSize + exp + if index < 0 { + return "", false + } + + num = n.intp + // If any of the digits being shifted to the right of the decimal point + // is non-zero, then the number is not an integer. + for i := index; i < intpSize; i++ { + if num[i] != '0' { + return "", false + } + } + num = num[:index] + } + + if n.neg { + return "-" + string(num), true + } + return string(num), true +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go new file mode 100644 index 000000000..f7fea7d8d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go @@ -0,0 +1,91 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "google.golang.org/protobuf/internal/strs" +) + +func (d *Decoder) parseString(in []byte) (string, int, error) { + in0 := in + if len(in) == 0 { + return "", 0, ErrUnexpectedEOF + } + if in[0] != '"' { + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q at start of string", in[0]) + } + in = in[1:] + i := indexNeedEscapeInBytes(in) + in, out := in[i:], in[:i:i] // set cap to prevent mutations + for len(in) > 0 { + switch r, n := utf8.DecodeRune(in); { + case r == utf8.RuneError && n == 1: + return "", 0, d.newSyntaxError(d.currPos(), "invalid UTF-8 in string") + case r < ' ': + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q in string", r) + case r == '"': + in = in[1:] + n := len(in0) - len(in) + return string(out), n, nil + case r == '\\': + if len(in) < 2 { + return "", 0, ErrUnexpectedEOF + } + switch r := in[1]; r { + case '"', '\\', '/': + in, out = in[2:], append(out, r) + case 'b': + in, out = in[2:], append(out, '\b') + case 'f': + in, out = in[2:], append(out, '\f') + case 'n': + in, out = in[2:], append(out, '\n') + case 'r': + in, out = in[2:], append(out, '\r') + case 't': + in, out = in[2:], append(out, '\t') + case 'u': + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + if err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + + r := rune(v) + if utf16.IsSurrogate(r) { + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + r = utf16.DecodeRune(r, rune(v)) + if in[0] != '\\' || in[1] != 'u' || + r == unicode.ReplacementChar || err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + } + out = append(out, string(r)...) + default: + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:2]) + } + default: + i := indexNeedEscapeInBytes(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + return "", 0, ErrUnexpectedEOF +} + +// indexNeedEscapeInBytes returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go new file mode 100644 index 000000000..50578d659 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go @@ -0,0 +1,192 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "strconv" +) + +// Kind represents a token kind expressible in the JSON format. +type Kind uint16 + +const ( + Invalid Kind = (1 << iota) / 2 + EOF + Null + Bool + Number + String + Name + ObjectOpen + ObjectClose + ArrayOpen + ArrayClose + + // comma is only for parsing in between tokens and + // does not need to be exported. + comma +) + +func (k Kind) String() string { + switch k { + case EOF: + return "eof" + case Null: + return "null" + case Bool: + return "bool" + case Number: + return "number" + case String: + return "string" + case ObjectOpen: + return "{" + case ObjectClose: + return "}" + case Name: + return "name" + case ArrayOpen: + return "[" + case ArrayClose: + return "]" + case comma: + return "," + } + return "" +} + +// Token provides a parsed token kind and value. +// +// Values are provided by the difference accessor methods. The accessor methods +// Name, Bool, and ParsedString will panic if called on the wrong kind. There +// are different accessor methods for the Number kind for converting to the +// appropriate Go numeric type and those methods have the ok return value. +type Token struct { + // Token kind. + kind Kind + // pos provides the position of the token in the original input. + pos int + // raw bytes of the serialized token. + // This is a subslice into the original input. + raw []byte + // boo is parsed boolean value. + boo bool + // str is parsed string value. + str string +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + return t.kind +} + +// RawString returns the read value in string. +func (t Token) RawString() string { + return string(t.raw) +} + +// Pos returns the token position from the input. +func (t Token) Pos() int { + return t.pos +} + +// Name returns the object name if token is Name, else it panics. +func (t Token) Name() string { + if t.kind == Name { + return t.str + } + panic(fmt.Sprintf("Token is not a Name: %v", t.RawString())) +} + +// Bool returns the bool value if token kind is Bool, else it panics. +func (t Token) Bool() bool { + if t.kind == Bool { + return t.boo + } + panic(fmt.Sprintf("Token is not a Bool: %v", t.RawString())) +} + +// ParsedString returns the string value for a JSON string token or the read +// value in string if token is not a string. +func (t Token) ParsedString() string { + if t.kind == String { + return t.str + } + panic(fmt.Sprintf("Token is not a String: %v", t.RawString())) +} + +// Float returns the floating-point number if token kind is Number. +// +// The floating-point precision is specified by the bitSize parameter: 32 for +// float32 or 64 for float64. If bitSize=32, the result still has type float64, +// but it will be convertible to float32 without changing its value. It will +// return false if the number exceeds the floating point limits for given +// bitSize. +func (t Token) Float(bitSize int) (float64, bool) { + if t.kind != Number { + return 0, false + } + f, err := strconv.ParseFloat(t.RawString(), bitSize) + if err != nil { + return 0, false + } + return f, true +} + +// Int returns the signed integer number if token is Number. +// +// The given bitSize specifies the integer type that the result must fit into. +// It returns false if the number is not an integer value or if the result +// exceeds the limits for given bitSize. +func (t Token) Int(bitSize int) (int64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseInt(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +// Uint returns the signed integer number if token is Number. +// +// The given bitSize specifies the unsigned integer type that the result must +// fit into. It returns false if the number is not an unsigned integer value +// or if the result exceeds the limits for given bitSize. +func (t Token) Uint(bitSize int) (uint64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseUint(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +func (t Token) getIntStr() (string, bool) { + if t.kind != Number { + return "", false + } + parts, ok := parseNumberParts(t.raw) + if !ok { + return "", false + } + return normalizeToIntString(parts) +} + +// TokenEquals returns true if given Tokens are equal, else false. +func TokenEquals(x, y Token) bool { + return x.kind == y.kind && + x.pos == y.pos && + bytes.Equal(x.raw, y.raw) && + x.boo == y.boo && + x.str == y.str +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go new file mode 100644 index 000000000..fbdf34873 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go @@ -0,0 +1,276 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "math" + "math/bits" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/errors" +) + +// kind represents an encoding type. +type kind uint8 + +const ( + _ kind = (1 << iota) / 2 + name + scalar + objectOpen + objectClose + arrayOpen + arrayClose +) + +// Encoder provides methods to write out JSON constructs and values. The user is +// responsible for producing valid sequences of JSON constructs and values. +type Encoder struct { + indent string + lastKind kind + indents []byte + out []byte +} + +// NewEncoder returns an Encoder. +// +// If indent is a non-empty string, it causes every entry for an Array or Object +// to be preceded by the indent and trailed by a newline. +func NewEncoder(indent string) (*Encoder, error) { + e := &Encoder{} + if len(indent) > 0 { + if strings.Trim(indent, " \t") != "" { + return nil, errors.New("indent may only be composed of space or tab characters") + } + e.indent = indent + } + return e, nil +} + +// Bytes returns the content of the written bytes. +func (e *Encoder) Bytes() []byte { + return e.out +} + +// WriteNull writes out the null value. +func (e *Encoder) WriteNull() { + e.prepareNext(scalar) + e.out = append(e.out, "null"...) +} + +// WriteBool writes out the given boolean value. +func (e *Encoder) WriteBool(b bool) { + e.prepareNext(scalar) + if b { + e.out = append(e.out, "true"...) + } else { + e.out = append(e.out, "false"...) + } +} + +// WriteString writes out the given string in JSON string value. Returns error +// if input string contains invalid UTF-8. +func (e *Encoder) WriteString(s string) error { + e.prepareNext(scalar) + var err error + if e.out, err = appendString(e.out, s); err != nil { + return err + } + return nil +} + +// Sentinel error used for indicating invalid UTF-8. +var errInvalidUTF8 = errors.New("invalid UTF-8") + +func appendString(out []byte, in string) ([]byte, error) { + out = append(out, '"') + i := indexNeedEscapeInString(in) + in, out = in[i:], append(out, in[:i]...) + for len(in) > 0 { + switch r, n := utf8.DecodeRuneInString(in); { + case r == utf8.RuneError && n == 1: + return out, errInvalidUTF8 + case r < ' ' || r == '"' || r == '\\': + out = append(out, '\\') + switch r { + case '"', '\\': + out = append(out, byte(r)) + case '\b': + out = append(out, 'b') + case '\f': + out = append(out, 'f') + case '\n': + out = append(out, 'n') + case '\r': + out = append(out, 'r') + case '\t': + out = append(out, 't') + default: + out = append(out, 'u') + out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + default: + i := indexNeedEscapeInString(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + out = append(out, '"') + return out, nil +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInString(s string) int { + for i, r := range s { + if r < ' ' || r == '\\' || r == '"' || r == utf8.RuneError { + return i + } + } + return len(s) +} + +// WriteFloat writes out the given float and bitSize in JSON number value. +func (e *Encoder) WriteFloat(n float64, bitSize int) { + e.prepareNext(scalar) + e.out = appendFloat(e.out, n, bitSize) +} + +// appendFloat formats given float in bitSize, and appends to the given []byte. +func appendFloat(out []byte, n float64, bitSize int) []byte { + switch { + case math.IsNaN(n): + return append(out, `"NaN"`...) + case math.IsInf(n, +1): + return append(out, `"Infinity"`...) + case math.IsInf(n, -1): + return append(out, `"-Infinity"`...) + } + + // JSON number formatting logic based on encoding/json. + // See floatEncoder.encode for reference. + fmt := byte('f') + if abs := math.Abs(n); abs != 0 { + if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) || + bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + out = strconv.AppendFloat(out, n, fmt, -1, bitSize) + if fmt == 'e' { + n := len(out) + if n >= 4 && out[n-4] == 'e' && out[n-3] == '-' && out[n-2] == '0' { + out[n-2] = out[n-1] + out = out[:n-1] + } + } + return out +} + +// WriteInt writes out the given signed integer in JSON number value. +func (e *Encoder) WriteInt(n int64) { + e.prepareNext(scalar) + e.out = append(e.out, strconv.FormatInt(n, 10)...) +} + +// WriteUint writes out the given unsigned integer in JSON number value. +func (e *Encoder) WriteUint(n uint64) { + e.prepareNext(scalar) + e.out = append(e.out, strconv.FormatUint(n, 10)...) +} + +// StartObject writes out the '{' symbol. +func (e *Encoder) StartObject() { + e.prepareNext(objectOpen) + e.out = append(e.out, '{') +} + +// EndObject writes out the '}' symbol. +func (e *Encoder) EndObject() { + e.prepareNext(objectClose) + e.out = append(e.out, '}') +} + +// WriteName writes out the given string in JSON string value and the name +// separator ':'. Returns error if input string contains invalid UTF-8, which +// should not be likely as protobuf field names should be valid. +func (e *Encoder) WriteName(s string) error { + e.prepareNext(name) + var err error + // Append to output regardless of error. + e.out, err = appendString(e.out, s) + e.out = append(e.out, ':') + return err +} + +// StartArray writes out the '[' symbol. +func (e *Encoder) StartArray() { + e.prepareNext(arrayOpen) + e.out = append(e.out, '[') +} + +// EndArray writes out the ']' symbol. +func (e *Encoder) EndArray() { + e.prepareNext(arrayClose) + e.out = append(e.out, ']') +} + +// prepareNext adds possible comma and indentation for the next value based +// on last type and indent option. It also updates lastKind to next. +func (e *Encoder) prepareNext(next kind) { + defer func() { + // Set lastKind to next. + e.lastKind = next + }() + + if len(e.indent) == 0 { + // Need to add comma on the following condition. + if e.lastKind&(scalar|objectClose|arrayClose) != 0 && + next&(name|scalar|objectOpen|arrayOpen) != 0 { + e.out = append(e.out, ',') + // For single-line output, add a random extra space after each + // comma to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } + return + } + + switch { + case e.lastKind&(objectOpen|arrayOpen) != 0: + // If next type is NOT closing, add indent and newline. + if next&(objectClose|arrayClose) == 0 { + e.indents = append(e.indents, e.indent...) + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + } + + case e.lastKind&(scalar|objectClose|arrayClose) != 0: + switch { + // If next type is either a value or name, add comma and newline. + case next&(name|scalar|objectOpen|arrayOpen) != 0: + e.out = append(e.out, ',', '\n') + + // If next type is a closing object or array, adjust indentation. + case next&(objectClose|arrayClose) != 0: + e.indents = e.indents[:len(e.indents)-len(e.indent)] + e.out = append(e.out, '\n') + } + e.out = append(e.out, e.indents...) + + case e.lastKind&name != 0: + e.out = append(e.out, ' ') + // For multi-line output, add a random extra space after key: to make + // output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go index b1eeea507..c1866f3c1 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go @@ -11,10 +11,9 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" ) -// The MessageSet wire format is equivalent to a message defiend as follows, +// The MessageSet wire format is equivalent to a message defined as follows, // where each Item defines an extension field with a field number of 'type_id' // and content of 'message'. MessageSet extensions must be non-repeated message // fields. @@ -48,33 +47,17 @@ func IsMessageSet(md pref.MessageDescriptor) bool { return ok && xmd.IsMessageSet() } -// IsMessageSetExtension reports this field extends a MessageSet. +// IsMessageSetExtension reports this field properly extends a MessageSet. func IsMessageSetExtension(fd pref.FieldDescriptor) bool { - if fd.Name() != ExtensionName { + switch { + case fd.Name() != ExtensionName: return false - } - if fd.FullName().Parent() != fd.Message().FullName() { + case !IsMessageSet(fd.ContainingMessage()): + return false + case fd.FullName().Parent() != fd.Message().FullName(): return false } - return IsMessageSet(fd.ContainingMessage()) -} - -// FindMessageSetExtension locates a MessageSet extension field by name. -// In text and JSON formats, the extension name used is the message itself. -// The extension field name is derived by appending ExtensionName. -func FindMessageSetExtension(r preg.ExtensionTypeResolver, s pref.FullName) (pref.ExtensionType, error) { - name := s.Append(ExtensionName) - xt, err := r.FindExtensionByName(name) - if err != nil { - if err == preg.NotFound { - return nil, err - } - return nil, errors.Wrap(err, "%q", name) - } - if !IsMessageSetExtension(xt.TypeDescriptor()) { - return nil, preg.NotFound - } - return xt, nil + return true } // SizeField returns the size of a MessageSet item field containing an extension diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 16c02d7b6..38f1931c6 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -104,7 +104,7 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p case strings.HasPrefix(s, "json="): jsonName := s[len("json="):] if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) { - f.L1.JSONName.Init(jsonName) + f.L1.StringName.InitJSON(jsonName) } case s == "packed": f.L1.HasPacked = true diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index eb10ea102..37803773f 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -381,7 +381,7 @@ func (d *Decoder) currentOpenKind() (Kind, byte) { case '[': return ListOpen, ']' } - panic(fmt.Sprintf("Decoder: openStack contains invalid byte %s", string(openCh))) + panic(fmt.Sprintf("Decoder: openStack contains invalid byte %c", openCh)) } func (d *Decoder) pushOpenStack(ch byte) { diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go index c4ba1c598..da289ccce 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go @@ -32,7 +32,6 @@ type Encoder struct { encoderState indent string - newline string // set to "\n" if len(indent) > 0 delims [2]byte outputASCII bool } @@ -61,7 +60,6 @@ func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, erro return nil, errors.New("indent may only be composed of space and tab characters") } e.indent = indent - e.newline = "\n" } switch delims { case [2]byte{0, 0}: @@ -126,7 +124,7 @@ func appendString(out []byte, in string, outputASCII bool) []byte { // are used to represent both the proto string and bytes type. r = rune(in[0]) fallthrough - case r < ' ' || r == '"' || r == '\\': + case r < ' ' || r == '"' || r == '\\' || r == 0x7f: out = append(out, '\\') switch r { case '"', '\\': @@ -143,7 +141,7 @@ func appendString(out []byte, in string, outputASCII bool) []byte { out = strconv.AppendUint(out, uint64(r), 16) } in = in[n:] - case outputASCII && r >= utf8.RuneSelf: + case r >= utf8.RuneSelf && (outputASCII || r <= 0x009f): out = append(out, '\\') if r <= math.MaxUint16 { out = append(out, 'u') @@ -168,7 +166,7 @@ func appendString(out []byte, in string, outputASCII bool) []byte { // escaping. If no characters need escaping, this returns the input length. func indexNeedEscapeInString(s string) int { for i := 0; i < len(s); i++ { - if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= utf8.RuneSelf { + if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= 0x7f { return i } } @@ -265,3 +263,8 @@ func (e *Encoder) Snapshot() encoderState { func (e *Encoder) Reset(es encoderState) { e.encoderState = es } + +// AppendString appends the escaped form of the input string to b. +func AppendString(b []byte, s string) []byte { + return appendString(b, s, false) +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go index f90e909b3..fbcd34920 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.13 // +build !go1.13 package errors diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go index dc05f4191..5e72f1cde 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.13 // +build go1.13 package errors diff --git a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go b/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go deleted file mode 100644 index 517c4e2a0..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package fieldsort defines an ordering of fields. -// -// The ordering defined by this package matches the historic behavior of the proto -// package, placing extensions first and oneofs last. -// -// There is no guarantee about stability of the wire encoding, and users should not -// depend on the order defined in this package as it is subject to change without -// notice. -package fieldsort - -import ( - "google.golang.org/protobuf/reflect/protoreflect" -) - -// Less returns true if field a comes before field j in ordered wire marshal output. -func Less(a, b protoreflect.FieldDescriptor) bool { - ea := a.IsExtension() - eb := b.IsExtension() - oa := a.ContainingOneof() - ob := b.ContainingOneof() - switch { - case ea != eb: - return ea - case oa != nil && ob != nil: - if oa == ob { - return a.Number() < b.Number() - } - return oa.Index() < ob.Index() - case oa != nil && !oa.IsSynthetic(): - return false - case ob != nil && !ob.IsSynthetic(): - return true - default: - return a.Number() < b.Number() - } -} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go index d02d770c9..b293b6947 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/build.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go @@ -3,6 +3,9 @@ // license that can be found in the LICENSE file. // Package filedesc provides functionality for constructing descriptors. +// +// The types in this package implement interfaces in the protoreflect package +// related to protobuf descripriptors. package filedesc import ( diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 9385126fb..98ab142ae 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -13,6 +13,7 @@ import ( "google.golang.org/protobuf/internal/descfmt" "google.golang.org/protobuf/internal/descopts" "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" @@ -99,15 +100,6 @@ func (fd *File) lazyInitOnce() { fd.mu.Unlock() } -// ProtoLegacyRawDesc is a pseudo-internal API for allowing the v1 code -// to be able to retrieve the raw descriptor. -// -// WARNING: This method is exempt from the compatibility promise and may be -// removed in the future without warning. -func (fd *File) ProtoLegacyRawDesc() []byte { - return fd.builder.RawDescriptor -} - // GoPackagePath is a pseudo-internal API for determining the Go package path // that this file descriptor is declared in. // @@ -207,7 +199,7 @@ type ( Number pref.FieldNumber Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers Kind pref.Kind - JSONName jsonName + StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions HasPacked bool // promoted from google.protobuf.FieldOptions @@ -277,8 +269,9 @@ func (fd *Field) Options() pref.ProtoMessage { func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number } func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality } func (fd *Field) Kind() pref.Kind { return fd.L1.Kind } -func (fd *Field) HasJSONName() bool { return fd.L1.JSONName.has } -func (fd *Field) JSONName() string { return fd.L1.JSONName.get(fd) } +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) } @@ -373,7 +366,7 @@ type ( } ExtensionL2 struct { Options func() pref.ProtoMessage - JSONName jsonName + StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsPacked bool // promoted from google.protobuf.FieldOptions Default defaultValue @@ -391,8 +384,9 @@ func (xd *Extension) Options() pref.ProtoMessage { func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number } func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality } func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind } -func (xd *Extension) HasJSONName() bool { return xd.lazyInit().JSONName.has } -func (xd *Extension) JSONName() string { return xd.lazyInit().JSONName.get(xd) } +func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } +func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } +func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated } func (xd *Extension) HasOptionalKeyword() bool { return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional @@ -506,27 +500,50 @@ func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syn func (d *Base) IsPlaceholder() bool { return false } func (d *Base) ProtoInternal(pragma.DoNotImplement) {} -type jsonName struct { - has bool - once sync.Once - name string +type stringName struct { + hasJSON bool + once sync.Once + nameJSON string + nameText string } -// Init initializes the name. It is exported for use by other internal packages. -func (js *jsonName) Init(s string) { - js.has = true - js.name = s +// InitJSON initializes the name. It is exported for use by other internal packages. +func (s *stringName) InitJSON(name string) { + s.hasJSON = true + s.nameJSON = name } -func (js *jsonName) get(fd pref.FieldDescriptor) string { - if !js.has { - js.once.Do(func() { - js.name = strs.JSONCamelCase(string(fd.Name())) - }) - } - return js.name +func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { + s.once.Do(func() { + if fd.IsExtension() { + // For extensions, JSON and text are formatted the same way. + var name string + if messageset.IsMessageSetExtension(fd) { + name = string("[" + fd.FullName().Parent() + "]") + } else { + name = string("[" + fd.FullName() + "]") + } + s.nameJSON = name + s.nameText = name + } else { + // Format the JSON name. + if !s.hasJSON { + s.nameJSON = strs.JSONCamelCase(string(fd.Name())) + } + + // Format the text name. + s.nameText = string(fd.Name()) + if fd.Kind() == pref.GroupKind { + s.nameText = string(fd.Message().Name()) + } + } + }) + return s } +func (s *stringName) getJSON(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } +func (s *stringName) getText(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameText } + func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { dv := defaultValue{has: v.IsValid(), val: v, enum: ev} if b, ok := v.Interface().([]byte); ok { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index e672233e7..198451e3e 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -451,7 +451,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des case genid.FieldDescriptorProto_Name_field_number: fd.L0.FullName = appendFullName(sb, pd.FullName(), v) case genid.FieldDescriptorProto_JsonName_field_number: - fd.L1.JSONName.Init(sb.MakeString(v)) + fd.L1.StringName.InitJSON(sb.MakeString(v)) case genid.FieldDescriptorProto_DefaultValue_field_number: fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages case genid.FieldDescriptorProto_TypeName_field_number: @@ -551,7 +551,7 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { b = b[m:] switch num { case genid.FieldDescriptorProto_JsonName_field_number: - xd.L2.JSONName.Init(sb.MakeString(v)) + xd.L2.StringName.InitJSON(sb.MakeString(v)) case genid.FieldDescriptorProto_DefaultValue_field_number: xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions case genid.FieldDescriptorProto_TypeName_field_number: diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go index c876cd34d..aa294fff9 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go @@ -6,9 +6,12 @@ package filedesc import ( "fmt" + "math" "sort" "sync" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/descfmt" "google.golang.org/protobuf/internal/errors" @@ -245,6 +248,7 @@ type OneofFields struct { once sync.Once byName map[pref.Name]pref.FieldDescriptor // protected by once byJSON map[string]pref.FieldDescriptor // protected by once + byText map[string]pref.FieldDescriptor // protected by once byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once } @@ -252,6 +256,7 @@ func (p *OneofFields) Len() int { return func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] } func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] } func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] } +func (p *OneofFields) ByTextName(s string) pref.FieldDescriptor { return p.lazyInit().byText[s] } func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] } func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} @@ -261,11 +266,13 @@ func (p *OneofFields) lazyInit() *OneofFields { if len(p.List) > 0 { p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List)) p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List)) + p.byText = make(map[string]pref.FieldDescriptor, len(p.List)) p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List)) for _, f := range p.List { // Field names and numbers are guaranteed to be unique. p.byName[f.Name()] = f p.byJSON[f.JSONName()] = f + p.byText[f.TextName()] = f p.byNum[f.Number()] = f } } @@ -274,9 +281,170 @@ func (p *OneofFields) lazyInit() *OneofFields { } type SourceLocations struct { + // List is a list of SourceLocations. + // The SourceLocation.Next field does not need to be populated + // as it will be lazily populated upon first need. List []pref.SourceLocation + + // File is the parent file descriptor that these locations are relative to. + // If non-nil, ByDescriptor verifies that the provided descriptor + // is a child of this file descriptor. + File pref.FileDescriptor + + once sync.Once + byPath map[pathKey]int +} + +func (p *SourceLocations) Len() int { return len(p.List) } +func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.lazyInit().List[i] } +func (p *SourceLocations) byKey(k pathKey) pref.SourceLocation { + if i, ok := p.lazyInit().byPath[k]; ok { + return p.List[i] + } + return pref.SourceLocation{} +} +func (p *SourceLocations) ByPath(path pref.SourcePath) pref.SourceLocation { + return p.byKey(newPathKey(path)) +} +func (p *SourceLocations) ByDescriptor(desc pref.Descriptor) pref.SourceLocation { + if p.File != nil && desc != nil && p.File != desc.ParentFile() { + return pref.SourceLocation{} // mismatching parent files + } + var pathArr [16]int32 + path := pathArr[:0] + for { + switch desc.(type) { + case pref.FileDescriptor: + // Reverse the path since it was constructed in reverse. + for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { + path[i], path[j] = path[j], path[i] + } + return p.byKey(newPathKey(path)) + case pref.MessageDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_MessageType_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_NestedType_field_number)) + default: + return pref.SourceLocation{} + } + case pref.FieldDescriptor: + isExtension := desc.(pref.FieldDescriptor).IsExtension() + path = append(path, int32(desc.Index())) + desc = desc.Parent() + if isExtension { + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_Extension_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_Extension_field_number)) + default: + return pref.SourceLocation{} + } + } else { + switch desc.(type) { + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_Field_field_number)) + default: + return pref.SourceLocation{} + } + } + case pref.OneofDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_OneofDecl_field_number)) + default: + return pref.SourceLocation{} + } + case pref.EnumDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_EnumType_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_EnumType_field_number)) + default: + return pref.SourceLocation{} + } + case pref.EnumValueDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.EnumDescriptor: + path = append(path, int32(genid.EnumDescriptorProto_Value_field_number)) + default: + return pref.SourceLocation{} + } + case pref.ServiceDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_Service_field_number)) + default: + return pref.SourceLocation{} + } + case pref.MethodDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.ServiceDescriptor: + path = append(path, int32(genid.ServiceDescriptorProto_Method_field_number)) + default: + return pref.SourceLocation{} + } + default: + return pref.SourceLocation{} + } + } } +func (p *SourceLocations) lazyInit() *SourceLocations { + p.once.Do(func() { + if len(p.List) > 0 { + // Collect all the indexes for a given path. + pathIdxs := make(map[pathKey][]int, len(p.List)) + for i, l := range p.List { + k := newPathKey(l.Path) + pathIdxs[k] = append(pathIdxs[k], i) + } -func (p *SourceLocations) Len() int { return len(p.List) } -func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.List[i] } + // Update the next index for all locations. + p.byPath = make(map[pathKey]int, len(p.List)) + for k, idxs := range pathIdxs { + for i := 0; i < len(idxs)-1; i++ { + p.List[idxs[i]].Next = idxs[i+1] + } + p.List[idxs[len(idxs)-1]].Next = 0 + p.byPath[k] = idxs[0] // record the first location for this path + } + } + }) + return p +} func (p *SourceLocations) ProtoInternal(pragma.DoNotImplement) {} + +// pathKey is a comparable representation of protoreflect.SourcePath. +type pathKey struct { + arr [16]uint8 // first n-1 path segments; last element is the length + str string // used if the path does not fit in arr +} + +func newPathKey(p pref.SourcePath) (k pathKey) { + if len(p) < len(k.arr) { + for i, ps := range p { + if ps < 0 || math.MaxUint8 <= ps { + return pathKey{str: p.String()} + } + k.arr[i] = uint8(ps) + } + k.arr[len(k.arr)-1] = uint8(len(p)) + return k + } + return pathKey{str: p.String()} +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go index 6a8825e80..30db19fdc 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go @@ -142,6 +142,7 @@ type Fields struct { once sync.Once byName map[protoreflect.Name]*Field // protected by once byJSON map[string]*Field // protected by once + byText map[string]*Field // protected by once byNum map[protoreflect.FieldNumber]*Field // protected by once } @@ -163,6 +164,12 @@ func (p *Fields) ByJSONName(s string) protoreflect.FieldDescriptor { } return nil } +func (p *Fields) ByTextName(s string) protoreflect.FieldDescriptor { + if d := p.lazyInit().byText[s]; d != nil { + return d + } + return nil +} func (p *Fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { if d := p.lazyInit().byNum[n]; d != nil { return d @@ -178,6 +185,7 @@ func (p *Fields) lazyInit() *Fields { if len(p.List) > 0 { p.byName = make(map[protoreflect.Name]*Field, len(p.List)) p.byJSON = make(map[string]*Field, len(p.List)) + p.byText = make(map[string]*Field, len(p.List)) p.byNum = make(map[protoreflect.FieldNumber]*Field, len(p.List)) for i := range p.List { d := &p.List[i] @@ -187,6 +195,9 @@ func (p *Fields) lazyInit() *Fields { if _, ok := p.byJSON[d.JSONName()]; !ok { p.byJSON[d.JSONName()] = d } + if _, ok := p.byText[d.TextName()]; !ok { + p.byText[d.TextName()] = d + } if _, ok := p.byNum[d.Number()]; !ok { p.byNum[d.Number()] = d } diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go index a72995f02..bda8e8cf3 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !protolegacy // +build !protolegacy package flags diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go index 772e2f0e4..6d8d9bd6b 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build protolegacy // +build protolegacy package flags diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index b5974528d..abee5f30e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -167,7 +167,7 @@ func (Export) MessageTypeOf(m message) pref.MessageType { if mv := (Export{}).protoMessageV2Of(m); mv != nil { return mv.ProtoReflect().Type() } - return legacyLoadMessageInfo(reflect.TypeOf(m), "") + return legacyLoadMessageType(reflect.TypeOf(m), "") } // MessageStringOf returns the message value as a string, diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index c00744d38..cb4b482d1 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -10,6 +10,7 @@ import ( "sync" "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" pref "google.golang.org/protobuf/reflect/protoreflect" preg "google.golang.org/protobuf/reflect/protoregistry" @@ -20,6 +21,7 @@ type errInvalidUTF8 struct{} func (errInvalidUTF8) Error() string { return "string field contains invalid UTF-8" } func (errInvalidUTF8) InvalidUTF8() bool { return true } +func (errInvalidUTF8) Unwrap() error { return errors.Error } // initOneofFieldCoders initializes the fast-path functions for the fields in a oneof. // @@ -242,7 +244,7 @@ func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if p.Elem().IsNil() { p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) @@ -276,7 +278,7 @@ func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarsh } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ Buf: v, @@ -420,7 +422,7 @@ func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowir } b, n := protowire.ConsumeGroup(num, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ Buf: b, @@ -494,7 +496,7 @@ func consumeMessageSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderF } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } m := reflect.New(f.mi.GoReflectType.Elem()).Interface() mp := pointerOfIface(m) @@ -550,7 +552,7 @@ func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowir } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } mp := reflect.New(goType.Elem()) o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ @@ -613,7 +615,7 @@ func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wt } v, n := protowire.ConsumeBytes(b) if n < 0 { - return pref.Value{}, out, protowire.ParseError(n) + return pref.Value{}, out, errDecode } m := list.NewElement() o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ @@ -681,7 +683,7 @@ func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wt } b, n := protowire.ConsumeGroup(num, b) if n < 0 { - return pref.Value{}, out, protowire.ParseError(n) + return pref.Value{}, out, errDecode } m := list.NewElement() o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ @@ -767,7 +769,7 @@ func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire } b, n := protowire.ConsumeGroup(num, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } mp := reflect.New(goType.Elem()) o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go index ff198d0a1..1a509b63e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -15,13 +15,13 @@ import ( ) // sizeBool returns the size of wire encoding a bool pointer as a Bool. -func sizeBool(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBool(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Bool() return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) } // appendBool wire encodes a bool pointer as a Bool. -func appendBool(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBool(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bool() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeBool(v)) @@ -29,7 +29,7 @@ func appendBool(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byt } // consumeBool wire decodes a bool pointer as a Bool. -func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -45,7 +45,7 @@ func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Bool() = protowire.DecodeBool(v) out.n = n @@ -61,7 +61,7 @@ var coderBool = pointerCoderFuncs{ // sizeBoolNoZero returns the size of wire encoding a bool pointer as a Bool. // The zero value is not encoded. -func sizeBoolNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBoolNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Bool() if v == false { return 0 @@ -71,7 +71,7 @@ func sizeBoolNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { // appendBoolNoZero wire encodes a bool pointer as a Bool. // The zero value is not encoded. -func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bool() if v == false { return b, nil @@ -90,14 +90,14 @@ var coderBoolNoZero = pointerCoderFuncs{ // sizeBoolPtr returns the size of wire encoding a *bool pointer as a Bool. // It panics if the pointer is nil. -func sizeBoolPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBoolPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.BoolPtr() return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) } // appendBoolPtr wire encodes a *bool pointer as a Bool. // It panics if the pointer is nil. -func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.BoolPtr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeBool(v)) @@ -105,7 +105,7 @@ func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([] } // consumeBoolPtr wire decodes a *bool pointer as a Bool. -func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -121,7 +121,7 @@ func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.BoolPtr() if *vp == nil { @@ -140,7 +140,7 @@ var coderBoolPtr = pointerCoderFuncs{ } // sizeBoolSlice returns the size of wire encoding a []bool pointer as a repeated Bool. -func sizeBoolSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBoolSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.BoolSlice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) @@ -149,7 +149,7 @@ func sizeBoolSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { } // appendBoolSlice encodes a []bool pointer as a repeated Bool. -func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.BoolSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -159,13 +159,13 @@ func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeBoolSlice wire decodes a []bool pointer as a repeated Bool. -func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.BoolSlice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -180,7 +180,7 @@ func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, protowire.DecodeBool(v)) b = b[n:] @@ -204,7 +204,7 @@ func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, protowire.DecodeBool(v)) out.n = n @@ -219,7 +219,7 @@ var coderBoolSlice = pointerCoderFuncs{ } // sizeBoolPackedSlice returns the size of wire encoding a []bool pointer as a packed repeated Bool. -func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.BoolSlice() if len(s) == 0 { return 0 @@ -232,7 +232,7 @@ func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size i } // appendBoolPackedSlice encodes a []bool pointer as a packed repeated Bool. -func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.BoolSlice() if len(s) == 0 { return b, nil @@ -257,19 +257,19 @@ var coderBoolPackedSlice = pointerCoderFuncs{ } // sizeBoolValue returns the size of wire encoding a bool value as a Bool. -func sizeBoolValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeBoolValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool())) } // appendBoolValue encodes a bool value as a Bool. -func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) return b, nil } // consumeBoolValue decodes a bool value as a Bool. -func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -285,7 +285,7 @@ func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp p v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfBool(protowire.DecodeBool(v)), out, nil @@ -299,7 +299,7 @@ var coderBoolValue = valueCoderFuncs{ } // sizeBoolSliceValue returns the size of wire encoding a []bool value as a repeated Bool. -func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -309,7 +309,7 @@ func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) } // appendBoolSliceValue encodes a []bool value as a repeated Bool. -func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -320,12 +320,12 @@ func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeBoolSliceValue wire decodes a []bool value as a repeated Bool. -func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -340,7 +340,7 @@ func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) b = b[n:] @@ -363,7 +363,7 @@ func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) out.n = n @@ -378,7 +378,7 @@ var coderBoolSliceValue = valueCoderFuncs{ } // sizeBoolPackedSliceValue returns the size of wire encoding a []bool value as a packed repeated Bool. -func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -393,7 +393,7 @@ func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOp } // appendBoolPackedSliceValue encodes a []bool value as a packed repeated Bool. -func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -421,19 +421,19 @@ var coderBoolPackedSliceValue = valueCoderFuncs{ } // sizeEnumValue returns the size of wire encoding a value as a Enum. -func sizeEnumValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeEnumValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(uint64(v.Enum())) } // appendEnumValue encodes a value as a Enum. -func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, uint64(v.Enum())) return b, nil } // consumeEnumValue decodes a value as a Enum. -func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -449,7 +449,7 @@ func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp p v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), out, nil @@ -463,7 +463,7 @@ var coderEnumValue = valueCoderFuncs{ } // sizeEnumSliceValue returns the size of wire encoding a [] value as a repeated Enum. -func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -473,7 +473,7 @@ func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) } // appendEnumSliceValue encodes a [] value as a repeated Enum. -func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -484,12 +484,12 @@ func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeEnumSliceValue wire decodes a [] value as a repeated Enum. -func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -504,7 +504,7 @@ func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) b = b[n:] @@ -527,7 +527,7 @@ func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) out.n = n @@ -542,7 +542,7 @@ var coderEnumSliceValue = valueCoderFuncs{ } // sizeEnumPackedSliceValue returns the size of wire encoding a [] value as a packed repeated Enum. -func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -557,7 +557,7 @@ func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOp } // appendEnumPackedSliceValue encodes a [] value as a packed repeated Enum. -func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -585,13 +585,13 @@ var coderEnumPackedSliceValue = valueCoderFuncs{ } // sizeInt32 returns the size of wire encoding a int32 pointer as a Int32. -func sizeInt32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendInt32 wire encodes a int32 pointer as a Int32. -func appendInt32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -599,7 +599,7 @@ func appendInt32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by } // consumeInt32 wire decodes a int32 pointer as a Int32. -func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -615,7 +615,7 @@ func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int32() = int32(v) out.n = n @@ -631,7 +631,7 @@ var coderInt32 = pointerCoderFuncs{ // sizeInt32NoZero returns the size of wire encoding a int32 pointer as a Int32. // The zero value is not encoded. -func sizeInt32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() if v == 0 { return 0 @@ -641,7 +641,7 @@ func sizeInt32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendInt32NoZero wire encodes a int32 pointer as a Int32. // The zero value is not encoded. -func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() if v == 0 { return b, nil @@ -660,14 +660,14 @@ var coderInt32NoZero = pointerCoderFuncs{ // sizeInt32Ptr returns the size of wire encoding a *int32 pointer as a Int32. // It panics if the pointer is nil. -func sizeInt32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Int32Ptr() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendInt32Ptr wire encodes a *int32 pointer as a Int32. // It panics if the pointer is nil. -func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -675,7 +675,7 @@ func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeInt32Ptr wire decodes a *int32 pointer as a Int32. -func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -691,7 +691,7 @@ func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int32Ptr() if *vp == nil { @@ -710,7 +710,7 @@ var coderInt32Ptr = pointerCoderFuncs{ } // sizeInt32Slice returns the size of wire encoding a []int32 pointer as a repeated Int32. -func sizeInt32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(uint64(v)) @@ -719,7 +719,7 @@ func sizeInt32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { } // appendInt32Slice encodes a []int32 pointer as a repeated Int32. -func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -729,13 +729,13 @@ func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeInt32Slice wire decodes a []int32 pointer as a repeated Int32. -func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -750,7 +750,7 @@ func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int32(v)) b = b[n:] @@ -774,7 +774,7 @@ func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int32(v)) out.n = n @@ -789,7 +789,7 @@ var coderInt32Slice = pointerCoderFuncs{ } // sizeInt32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Int32. -func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() if len(s) == 0 { return 0 @@ -802,7 +802,7 @@ func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendInt32PackedSlice encodes a []int32 pointer as a packed repeated Int32. -func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() if len(s) == 0 { return b, nil @@ -827,19 +827,19 @@ var coderInt32PackedSlice = pointerCoderFuncs{ } // sizeInt32Value returns the size of wire encoding a int32 value as a Int32. -func sizeInt32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeInt32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(uint64(int32(v.Int()))) } // appendInt32Value encodes a int32 value as a Int32. -func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, uint64(int32(v.Int()))) return b, nil } // consumeInt32Value decodes a int32 value as a Int32. -func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -855,7 +855,7 @@ func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt32(int32(v)), out, nil @@ -869,7 +869,7 @@ var coderInt32Value = valueCoderFuncs{ } // sizeInt32SliceValue returns the size of wire encoding a []int32 value as a repeated Int32. -func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -879,7 +879,7 @@ func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions } // appendInt32SliceValue encodes a []int32 value as a repeated Int32. -func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -890,12 +890,12 @@ func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeInt32SliceValue wire decodes a []int32 value as a repeated Int32. -func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -910,7 +910,7 @@ func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) b = b[n:] @@ -933,7 +933,7 @@ func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) out.n = n @@ -948,7 +948,7 @@ var coderInt32SliceValue = valueCoderFuncs{ } // sizeInt32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Int32. -func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -963,7 +963,7 @@ func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalO } // appendInt32PackedSliceValue encodes a []int32 value as a packed repeated Int32. -func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -991,13 +991,13 @@ var coderInt32PackedSliceValue = valueCoderFuncs{ } // sizeSint32 returns the size of wire encoding a int32 pointer as a Sint32. -func sizeSint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) } // appendSint32 wire encodes a int32 pointer as a Sint32. -func appendSint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) @@ -1005,7 +1005,7 @@ func appendSint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeSint32 wire decodes a int32 pointer as a Sint32. -func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1021,7 +1021,7 @@ func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int32() = int32(protowire.DecodeZigZag(v & math.MaxUint32)) out.n = n @@ -1037,7 +1037,7 @@ var coderSint32 = pointerCoderFuncs{ // sizeSint32NoZero returns the size of wire encoding a int32 pointer as a Sint32. // The zero value is not encoded. -func sizeSint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() if v == 0 { return 0 @@ -1047,7 +1047,7 @@ func sizeSint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendSint32NoZero wire encodes a int32 pointer as a Sint32. // The zero value is not encoded. -func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() if v == 0 { return b, nil @@ -1066,14 +1066,14 @@ var coderSint32NoZero = pointerCoderFuncs{ // sizeSint32Ptr returns the size of wire encoding a *int32 pointer as a Sint32. // It panics if the pointer is nil. -func sizeSint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Int32Ptr() return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) } // appendSint32Ptr wire encodes a *int32 pointer as a Sint32. // It panics if the pointer is nil. -func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) @@ -1081,7 +1081,7 @@ func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeSint32Ptr wire decodes a *int32 pointer as a Sint32. -func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1097,7 +1097,7 @@ func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int32Ptr() if *vp == nil { @@ -1116,7 +1116,7 @@ var coderSint32Ptr = pointerCoderFuncs{ } // sizeSint32Slice returns the size of wire encoding a []int32 pointer as a repeated Sint32. -func sizeSint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) @@ -1125,7 +1125,7 @@ func sizeSint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendSint32Slice encodes a []int32 pointer as a repeated Sint32. -func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -1135,13 +1135,13 @@ func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeSint32Slice wire decodes a []int32 pointer as a repeated Sint32. -func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -1156,7 +1156,7 @@ func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int32(protowire.DecodeZigZag(v&math.MaxUint32))) b = b[n:] @@ -1180,7 +1180,7 @@ func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int32(protowire.DecodeZigZag(v&math.MaxUint32))) out.n = n @@ -1195,7 +1195,7 @@ var coderSint32Slice = pointerCoderFuncs{ } // sizeSint32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sint32. -func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() if len(s) == 0 { return 0 @@ -1208,7 +1208,7 @@ func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendSint32PackedSlice encodes a []int32 pointer as a packed repeated Sint32. -func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() if len(s) == 0 { return b, nil @@ -1233,19 +1233,19 @@ var coderSint32PackedSlice = pointerCoderFuncs{ } // sizeSint32Value returns the size of wire encoding a int32 value as a Sint32. -func sizeSint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeSint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) } // appendSint32Value encodes a int32 value as a Sint32. -func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) return b, nil } // consumeSint32Value decodes a int32 value as a Sint32. -func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -1261,7 +1261,7 @@ func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), out, nil @@ -1275,7 +1275,7 @@ var coderSint32Value = valueCoderFuncs{ } // sizeSint32SliceValue returns the size of wire encoding a []int32 value as a repeated Sint32. -func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -1285,7 +1285,7 @@ func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendSint32SliceValue encodes a []int32 value as a repeated Sint32. -func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -1296,12 +1296,12 @@ func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeSint32SliceValue wire decodes a []int32 value as a repeated Sint32. -func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -1316,7 +1316,7 @@ func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) b = b[n:] @@ -1339,7 +1339,7 @@ func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) out.n = n @@ -1354,7 +1354,7 @@ var coderSint32SliceValue = valueCoderFuncs{ } // sizeSint32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sint32. -func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -1369,7 +1369,7 @@ func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendSint32PackedSliceValue encodes a []int32 value as a packed repeated Sint32. -func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -1397,13 +1397,13 @@ var coderSint32PackedSliceValue = valueCoderFuncs{ } // sizeUint32 returns the size of wire encoding a uint32 pointer as a Uint32. -func sizeUint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint32() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendUint32 wire encodes a uint32 pointer as a Uint32. -func appendUint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -1411,7 +1411,7 @@ func appendUint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeUint32 wire decodes a uint32 pointer as a Uint32. -func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1427,7 +1427,7 @@ func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Uint32() = uint32(v) out.n = n @@ -1443,7 +1443,7 @@ var coderUint32 = pointerCoderFuncs{ // sizeUint32NoZero returns the size of wire encoding a uint32 pointer as a Uint32. // The zero value is not encoded. -func sizeUint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint32() if v == 0 { return 0 @@ -1453,7 +1453,7 @@ func sizeUint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendUint32NoZero wire encodes a uint32 pointer as a Uint32. // The zero value is not encoded. -func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint32() if v == 0 { return b, nil @@ -1472,14 +1472,14 @@ var coderUint32NoZero = pointerCoderFuncs{ // sizeUint32Ptr returns the size of wire encoding a *uint32 pointer as a Uint32. // It panics if the pointer is nil. -func sizeUint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Uint32Ptr() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendUint32Ptr wire encodes a *uint32 pointer as a Uint32. // It panics if the pointer is nil. -func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Uint32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -1487,7 +1487,7 @@ func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeUint32Ptr wire decodes a *uint32 pointer as a Uint32. -func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1503,7 +1503,7 @@ func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Uint32Ptr() if *vp == nil { @@ -1522,7 +1522,7 @@ var coderUint32Ptr = pointerCoderFuncs{ } // sizeUint32Slice returns the size of wire encoding a []uint32 pointer as a repeated Uint32. -func sizeUint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint32Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(uint64(v)) @@ -1531,7 +1531,7 @@ func sizeUint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendUint32Slice encodes a []uint32 pointer as a repeated Uint32. -func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -1541,13 +1541,13 @@ func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeUint32Slice wire decodes a []uint32 pointer as a repeated Uint32. -func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -1562,7 +1562,7 @@ func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, uint32(v)) b = b[n:] @@ -1586,7 +1586,7 @@ func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, uint32(v)) out.n = n @@ -1601,7 +1601,7 @@ var coderUint32Slice = pointerCoderFuncs{ } // sizeUint32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Uint32. -func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint32Slice() if len(s) == 0 { return 0 @@ -1614,7 +1614,7 @@ func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendUint32PackedSlice encodes a []uint32 pointer as a packed repeated Uint32. -func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint32Slice() if len(s) == 0 { return b, nil @@ -1639,19 +1639,19 @@ var coderUint32PackedSlice = pointerCoderFuncs{ } // sizeUint32Value returns the size of wire encoding a uint32 value as a Uint32. -func sizeUint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeUint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(uint64(uint32(v.Uint()))) } // appendUint32Value encodes a uint32 value as a Uint32. -func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) return b, nil } // consumeUint32Value decodes a uint32 value as a Uint32. -func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -1667,7 +1667,7 @@ func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfUint32(uint32(v)), out, nil @@ -1681,7 +1681,7 @@ var coderUint32Value = valueCoderFuncs{ } // sizeUint32SliceValue returns the size of wire encoding a []uint32 value as a repeated Uint32. -func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -1691,7 +1691,7 @@ func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendUint32SliceValue encodes a []uint32 value as a repeated Uint32. -func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -1702,12 +1702,12 @@ func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeUint32SliceValue wire decodes a []uint32 value as a repeated Uint32. -func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -1722,7 +1722,7 @@ func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) b = b[n:] @@ -1745,7 +1745,7 @@ func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) out.n = n @@ -1760,7 +1760,7 @@ var coderUint32SliceValue = valueCoderFuncs{ } // sizeUint32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Uint32. -func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -1775,7 +1775,7 @@ func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendUint32PackedSliceValue encodes a []uint32 value as a packed repeated Uint32. -func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -1803,13 +1803,13 @@ var coderUint32PackedSliceValue = valueCoderFuncs{ } // sizeInt64 returns the size of wire encoding a int64 pointer as a Int64. -func sizeInt64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendInt64 wire encodes a int64 pointer as a Int64. -func appendInt64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -1817,7 +1817,7 @@ func appendInt64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by } // consumeInt64 wire decodes a int64 pointer as a Int64. -func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1833,7 +1833,7 @@ func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int64() = int64(v) out.n = n @@ -1849,7 +1849,7 @@ var coderInt64 = pointerCoderFuncs{ // sizeInt64NoZero returns the size of wire encoding a int64 pointer as a Int64. // The zero value is not encoded. -func sizeInt64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() if v == 0 { return 0 @@ -1859,7 +1859,7 @@ func sizeInt64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendInt64NoZero wire encodes a int64 pointer as a Int64. // The zero value is not encoded. -func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() if v == 0 { return b, nil @@ -1878,14 +1878,14 @@ var coderInt64NoZero = pointerCoderFuncs{ // sizeInt64Ptr returns the size of wire encoding a *int64 pointer as a Int64. // It panics if the pointer is nil. -func sizeInt64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Int64Ptr() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendInt64Ptr wire encodes a *int64 pointer as a Int64. // It panics if the pointer is nil. -func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -1893,7 +1893,7 @@ func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeInt64Ptr wire decodes a *int64 pointer as a Int64. -func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1909,7 +1909,7 @@ func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int64Ptr() if *vp == nil { @@ -1928,7 +1928,7 @@ var coderInt64Ptr = pointerCoderFuncs{ } // sizeInt64Slice returns the size of wire encoding a []int64 pointer as a repeated Int64. -func sizeInt64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(uint64(v)) @@ -1937,7 +1937,7 @@ func sizeInt64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { } // appendInt64Slice encodes a []int64 pointer as a repeated Int64. -func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -1947,13 +1947,13 @@ func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeInt64Slice wire decodes a []int64 pointer as a repeated Int64. -func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -1968,7 +1968,7 @@ func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int64(v)) b = b[n:] @@ -1992,7 +1992,7 @@ func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int64(v)) out.n = n @@ -2007,7 +2007,7 @@ var coderInt64Slice = pointerCoderFuncs{ } // sizeInt64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Int64. -func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() if len(s) == 0 { return 0 @@ -2020,7 +2020,7 @@ func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendInt64PackedSlice encodes a []int64 pointer as a packed repeated Int64. -func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() if len(s) == 0 { return b, nil @@ -2045,19 +2045,19 @@ var coderInt64PackedSlice = pointerCoderFuncs{ } // sizeInt64Value returns the size of wire encoding a int64 value as a Int64. -func sizeInt64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeInt64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(uint64(v.Int())) } // appendInt64Value encodes a int64 value as a Int64. -func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, uint64(v.Int())) return b, nil } // consumeInt64Value decodes a int64 value as a Int64. -func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -2073,7 +2073,7 @@ func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt64(int64(v)), out, nil @@ -2087,7 +2087,7 @@ var coderInt64Value = valueCoderFuncs{ } // sizeInt64SliceValue returns the size of wire encoding a []int64 value as a repeated Int64. -func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2097,7 +2097,7 @@ func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions } // appendInt64SliceValue encodes a []int64 value as a repeated Int64. -func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2108,12 +2108,12 @@ func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeInt64SliceValue wire decodes a []int64 value as a repeated Int64. -func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -2128,7 +2128,7 @@ func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) b = b[n:] @@ -2151,7 +2151,7 @@ func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) out.n = n @@ -2166,7 +2166,7 @@ var coderInt64SliceValue = valueCoderFuncs{ } // sizeInt64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Int64. -func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2181,7 +2181,7 @@ func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalO } // appendInt64PackedSliceValue encodes a []int64 value as a packed repeated Int64. -func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2209,13 +2209,13 @@ var coderInt64PackedSliceValue = valueCoderFuncs{ } // sizeSint64 returns the size of wire encoding a int64 pointer as a Sint64. -func sizeSint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) } // appendSint64 wire encodes a int64 pointer as a Sint64. -func appendSint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) @@ -2223,7 +2223,7 @@ func appendSint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeSint64 wire decodes a int64 pointer as a Sint64. -func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -2239,7 +2239,7 @@ func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int64() = protowire.DecodeZigZag(v) out.n = n @@ -2255,7 +2255,7 @@ var coderSint64 = pointerCoderFuncs{ // sizeSint64NoZero returns the size of wire encoding a int64 pointer as a Sint64. // The zero value is not encoded. -func sizeSint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() if v == 0 { return 0 @@ -2265,7 +2265,7 @@ func sizeSint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendSint64NoZero wire encodes a int64 pointer as a Sint64. // The zero value is not encoded. -func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() if v == 0 { return b, nil @@ -2284,14 +2284,14 @@ var coderSint64NoZero = pointerCoderFuncs{ // sizeSint64Ptr returns the size of wire encoding a *int64 pointer as a Sint64. // It panics if the pointer is nil. -func sizeSint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Int64Ptr() return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) } // appendSint64Ptr wire encodes a *int64 pointer as a Sint64. // It panics if the pointer is nil. -func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) @@ -2299,7 +2299,7 @@ func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeSint64Ptr wire decodes a *int64 pointer as a Sint64. -func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -2315,7 +2315,7 @@ func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int64Ptr() if *vp == nil { @@ -2334,7 +2334,7 @@ var coderSint64Ptr = pointerCoderFuncs{ } // sizeSint64Slice returns the size of wire encoding a []int64 pointer as a repeated Sint64. -func sizeSint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) @@ -2343,7 +2343,7 @@ func sizeSint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendSint64Slice encodes a []int64 pointer as a repeated Sint64. -func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -2353,13 +2353,13 @@ func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeSint64Slice wire decodes a []int64 pointer as a repeated Sint64. -func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -2374,7 +2374,7 @@ func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, protowire.DecodeZigZag(v)) b = b[n:] @@ -2398,7 +2398,7 @@ func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, protowire.DecodeZigZag(v)) out.n = n @@ -2413,7 +2413,7 @@ var coderSint64Slice = pointerCoderFuncs{ } // sizeSint64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sint64. -func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() if len(s) == 0 { return 0 @@ -2426,7 +2426,7 @@ func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendSint64PackedSlice encodes a []int64 pointer as a packed repeated Sint64. -func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() if len(s) == 0 { return b, nil @@ -2451,19 +2451,19 @@ var coderSint64PackedSlice = pointerCoderFuncs{ } // sizeSint64Value returns the size of wire encoding a int64 value as a Sint64. -func sizeSint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeSint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) } // appendSint64Value encodes a int64 value as a Sint64. -func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) return b, nil } // consumeSint64Value decodes a int64 value as a Sint64. -func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -2479,7 +2479,7 @@ func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), out, nil @@ -2493,7 +2493,7 @@ var coderSint64Value = valueCoderFuncs{ } // sizeSint64SliceValue returns the size of wire encoding a []int64 value as a repeated Sint64. -func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2503,7 +2503,7 @@ func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendSint64SliceValue encodes a []int64 value as a repeated Sint64. -func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2514,12 +2514,12 @@ func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeSint64SliceValue wire decodes a []int64 value as a repeated Sint64. -func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -2534,7 +2534,7 @@ func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) b = b[n:] @@ -2557,7 +2557,7 @@ func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) out.n = n @@ -2572,7 +2572,7 @@ var coderSint64SliceValue = valueCoderFuncs{ } // sizeSint64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sint64. -func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2587,7 +2587,7 @@ func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendSint64PackedSliceValue encodes a []int64 value as a packed repeated Sint64. -func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2615,13 +2615,13 @@ var coderSint64PackedSliceValue = valueCoderFuncs{ } // sizeUint64 returns the size of wire encoding a uint64 pointer as a Uint64. -func sizeUint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint64() return f.tagsize + protowire.SizeVarint(v) } // appendUint64 wire encodes a uint64 pointer as a Uint64. -func appendUint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, v) @@ -2629,7 +2629,7 @@ func appendUint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeUint64 wire decodes a uint64 pointer as a Uint64. -func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -2645,7 +2645,7 @@ func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Uint64() = v out.n = n @@ -2661,7 +2661,7 @@ var coderUint64 = pointerCoderFuncs{ // sizeUint64NoZero returns the size of wire encoding a uint64 pointer as a Uint64. // The zero value is not encoded. -func sizeUint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint64() if v == 0 { return 0 @@ -2671,7 +2671,7 @@ func sizeUint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendUint64NoZero wire encodes a uint64 pointer as a Uint64. // The zero value is not encoded. -func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint64() if v == 0 { return b, nil @@ -2690,14 +2690,14 @@ var coderUint64NoZero = pointerCoderFuncs{ // sizeUint64Ptr returns the size of wire encoding a *uint64 pointer as a Uint64. // It panics if the pointer is nil. -func sizeUint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Uint64Ptr() return f.tagsize + protowire.SizeVarint(v) } // appendUint64Ptr wire encodes a *uint64 pointer as a Uint64. // It panics if the pointer is nil. -func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Uint64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, v) @@ -2705,7 +2705,7 @@ func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeUint64Ptr wire decodes a *uint64 pointer as a Uint64. -func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -2721,7 +2721,7 @@ func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Uint64Ptr() if *vp == nil { @@ -2740,7 +2740,7 @@ var coderUint64Ptr = pointerCoderFuncs{ } // sizeUint64Slice returns the size of wire encoding a []uint64 pointer as a repeated Uint64. -func sizeUint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint64Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(v) @@ -2749,7 +2749,7 @@ func sizeUint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendUint64Slice encodes a []uint64 pointer as a repeated Uint64. -func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -2759,13 +2759,13 @@ func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeUint64Slice wire decodes a []uint64 pointer as a repeated Uint64. -func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -2780,7 +2780,7 @@ func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, v) b = b[n:] @@ -2804,7 +2804,7 @@ func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, v) out.n = n @@ -2819,7 +2819,7 @@ var coderUint64Slice = pointerCoderFuncs{ } // sizeUint64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Uint64. -func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint64Slice() if len(s) == 0 { return 0 @@ -2832,7 +2832,7 @@ func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendUint64PackedSlice encodes a []uint64 pointer as a packed repeated Uint64. -func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint64Slice() if len(s) == 0 { return b, nil @@ -2857,19 +2857,19 @@ var coderUint64PackedSlice = pointerCoderFuncs{ } // sizeUint64Value returns the size of wire encoding a uint64 value as a Uint64. -func sizeUint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeUint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(v.Uint()) } // appendUint64Value encodes a uint64 value as a Uint64. -func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, v.Uint()) return b, nil } // consumeUint64Value decodes a uint64 value as a Uint64. -func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -2885,7 +2885,7 @@ func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfUint64(v), out, nil @@ -2899,7 +2899,7 @@ var coderUint64Value = valueCoderFuncs{ } // sizeUint64SliceValue returns the size of wire encoding a []uint64 value as a repeated Uint64. -func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2909,7 +2909,7 @@ func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendUint64SliceValue encodes a []uint64 value as a repeated Uint64. -func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2920,12 +2920,12 @@ func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeUint64SliceValue wire decodes a []uint64 value as a repeated Uint64. -func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -2940,7 +2940,7 @@ func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint64(v)) b = b[n:] @@ -2963,7 +2963,7 @@ func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint64(v)) out.n = n @@ -2978,7 +2978,7 @@ var coderUint64SliceValue = valueCoderFuncs{ } // sizeUint64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Uint64. -func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2993,7 +2993,7 @@ func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendUint64PackedSliceValue encodes a []uint64 value as a packed repeated Uint64. -func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3021,13 +3021,13 @@ var coderUint64PackedSliceValue = valueCoderFuncs{ } // sizeSfixed32 returns the size of wire encoding a int32 pointer as a Sfixed32. -func sizeSfixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendSfixed32 wire encodes a int32 pointer as a Sfixed32. -func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, uint32(v)) @@ -3035,13 +3035,13 @@ func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeSfixed32 wire decodes a int32 pointer as a Sfixed32. -func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int32() = int32(v) out.n = n @@ -3057,7 +3057,7 @@ var coderSfixed32 = pointerCoderFuncs{ // sizeSfixed32NoZero returns the size of wire encoding a int32 pointer as a Sfixed32. // The zero value is not encoded. -func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() if v == 0 { return 0 @@ -3067,7 +3067,7 @@ func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size in // appendSfixed32NoZero wire encodes a int32 pointer as a Sfixed32. // The zero value is not encoded. -func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() if v == 0 { return b, nil @@ -3086,13 +3086,13 @@ var coderSfixed32NoZero = pointerCoderFuncs{ // sizeSfixed32Ptr returns the size of wire encoding a *int32 pointer as a Sfixed32. // It panics if the pointer is nil. -func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendSfixed32Ptr wire encodes a *int32 pointer as a Sfixed32. // It panics if the pointer is nil. -func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, uint32(v)) @@ -3100,13 +3100,13 @@ func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeSfixed32Ptr wire decodes a *int32 pointer as a Sfixed32. -func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int32Ptr() if *vp == nil { @@ -3125,14 +3125,14 @@ var coderSfixed32Ptr = pointerCoderFuncs{ } // sizeSfixed32Slice returns the size of wire encoding a []int32 pointer as a repeated Sfixed32. -func sizeSfixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() size = len(s) * (f.tagsize + protowire.SizeFixed32()) return size } // appendSfixed32Slice encodes a []int32 pointer as a repeated Sfixed32. -func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -3142,18 +3142,18 @@ func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOption } // consumeSfixed32Slice wire decodes a []int32 pointer as a repeated Sfixed32. -func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int32(v)) b = b[n:] @@ -3167,7 +3167,7 @@ func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFiel } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int32(v)) out.n = n @@ -3182,7 +3182,7 @@ var coderSfixed32Slice = pointerCoderFuncs{ } // sizeSfixed32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sfixed32. -func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() if len(s) == 0 { return 0 @@ -3192,7 +3192,7 @@ func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (si } // appendSfixed32PackedSlice encodes a []int32 pointer as a packed repeated Sfixed32. -func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() if len(s) == 0 { return b, nil @@ -3214,25 +3214,25 @@ var coderSfixed32PackedSlice = pointerCoderFuncs{ } // sizeSfixed32Value returns the size of wire encoding a int32 value as a Sfixed32. -func sizeSfixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeSfixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed32() } // appendSfixed32Value encodes a int32 value as a Sfixed32. -func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed32(b, uint32(v.Int())) return b, nil } // consumeSfixed32Value decodes a int32 value as a Sfixed32. -func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt32(int32(v)), out, nil @@ -3246,14 +3246,14 @@ var coderSfixed32Value = valueCoderFuncs{ } // sizeSfixed32SliceValue returns the size of wire encoding a []int32 value as a repeated Sfixed32. -func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed32()) return size } // appendSfixed32SliceValue encodes a []int32 value as a repeated Sfixed32. -func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -3264,17 +3264,17 @@ func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64 } // consumeSfixed32SliceValue wire decodes a []int32 value as a repeated Sfixed32. -func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) b = b[n:] @@ -3287,7 +3287,7 @@ func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.N } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) out.n = n @@ -3302,7 +3302,7 @@ var coderSfixed32SliceValue = valueCoderFuncs{ } // sizeSfixed32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sfixed32. -func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3313,7 +3313,7 @@ func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsh } // appendSfixed32PackedSliceValue encodes a []int32 value as a packed repeated Sfixed32. -func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3337,13 +3337,13 @@ var coderSfixed32PackedSliceValue = valueCoderFuncs{ } // sizeFixed32 returns the size of wire encoding a uint32 pointer as a Fixed32. -func sizeFixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendFixed32 wire encodes a uint32 pointer as a Fixed32. -func appendFixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, v) @@ -3351,13 +3351,13 @@ func appendFixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([] } // consumeFixed32 wire decodes a uint32 pointer as a Fixed32. -func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Uint32() = v out.n = n @@ -3373,7 +3373,7 @@ var coderFixed32 = pointerCoderFuncs{ // sizeFixed32NoZero returns the size of wire encoding a uint32 pointer as a Fixed32. // The zero value is not encoded. -func sizeFixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint32() if v == 0 { return 0 @@ -3383,7 +3383,7 @@ func sizeFixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int // appendFixed32NoZero wire encodes a uint32 pointer as a Fixed32. // The zero value is not encoded. -func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint32() if v == 0 { return b, nil @@ -3402,13 +3402,13 @@ var coderFixed32NoZero = pointerCoderFuncs{ // sizeFixed32Ptr returns the size of wire encoding a *uint32 pointer as a Fixed32. // It panics if the pointer is nil. -func sizeFixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendFixed32Ptr wire encodes a *uint32 pointer as a Fixed32. // It panics if the pointer is nil. -func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Uint32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, v) @@ -3416,13 +3416,13 @@ func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeFixed32Ptr wire decodes a *uint32 pointer as a Fixed32. -func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Uint32Ptr() if *vp == nil { @@ -3441,14 +3441,14 @@ var coderFixed32Ptr = pointerCoderFuncs{ } // sizeFixed32Slice returns the size of wire encoding a []uint32 pointer as a repeated Fixed32. -func sizeFixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint32Slice() size = len(s) * (f.tagsize + protowire.SizeFixed32()) return size } // appendFixed32Slice encodes a []uint32 pointer as a repeated Fixed32. -func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -3458,18 +3458,18 @@ func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions } // consumeFixed32Slice wire decodes a []uint32 pointer as a repeated Fixed32. -func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, v) b = b[n:] @@ -3483,7 +3483,7 @@ func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderField } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, v) out.n = n @@ -3498,7 +3498,7 @@ var coderFixed32Slice = pointerCoderFuncs{ } // sizeFixed32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Fixed32. -func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint32Slice() if len(s) == 0 { return 0 @@ -3508,7 +3508,7 @@ func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (siz } // appendFixed32PackedSlice encodes a []uint32 pointer as a packed repeated Fixed32. -func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint32Slice() if len(s) == 0 { return b, nil @@ -3530,25 +3530,25 @@ var coderFixed32PackedSlice = pointerCoderFuncs{ } // sizeFixed32Value returns the size of wire encoding a uint32 value as a Fixed32. -func sizeFixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeFixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed32() } // appendFixed32Value encodes a uint32 value as a Fixed32. -func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed32(b, uint32(v.Uint())) return b, nil } // consumeFixed32Value decodes a uint32 value as a Fixed32. -func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfUint32(uint32(v)), out, nil @@ -3562,14 +3562,14 @@ var coderFixed32Value = valueCoderFuncs{ } // sizeFixed32SliceValue returns the size of wire encoding a []uint32 value as a repeated Fixed32. -func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed32()) return size } // appendFixed32SliceValue encodes a []uint32 value as a repeated Fixed32. -func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -3580,17 +3580,17 @@ func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeFixed32SliceValue wire decodes a []uint32 value as a repeated Fixed32. -func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) b = b[n:] @@ -3603,7 +3603,7 @@ func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Nu } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) out.n = n @@ -3618,7 +3618,7 @@ var coderFixed32SliceValue = valueCoderFuncs{ } // sizeFixed32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Fixed32. -func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3629,7 +3629,7 @@ func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsha } // appendFixed32PackedSliceValue encodes a []uint32 value as a packed repeated Fixed32. -func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3653,13 +3653,13 @@ var coderFixed32PackedSliceValue = valueCoderFuncs{ } // sizeFloat returns the size of wire encoding a float32 pointer as a Float. -func sizeFloat(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloat(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendFloat wire encodes a float32 pointer as a Float. -func appendFloat(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloat(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Float32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, math.Float32bits(v)) @@ -3667,13 +3667,13 @@ func appendFloat(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by } // consumeFloat wire decodes a float32 pointer as a Float. -func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Float32() = math.Float32frombits(v) out.n = n @@ -3689,7 +3689,7 @@ var coderFloat = pointerCoderFuncs{ // sizeFloatNoZero returns the size of wire encoding a float32 pointer as a Float. // The zero value is not encoded. -func sizeFloatNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloatNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Float32() if v == 0 && !math.Signbit(float64(v)) { return 0 @@ -3699,7 +3699,7 @@ func sizeFloatNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendFloatNoZero wire encodes a float32 pointer as a Float. // The zero value is not encoded. -func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Float32() if v == 0 && !math.Signbit(float64(v)) { return b, nil @@ -3718,13 +3718,13 @@ var coderFloatNoZero = pointerCoderFuncs{ // sizeFloatPtr returns the size of wire encoding a *float32 pointer as a Float. // It panics if the pointer is nil. -func sizeFloatPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloatPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendFloatPtr wire encodes a *float32 pointer as a Float. // It panics if the pointer is nil. -func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Float32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, math.Float32bits(v)) @@ -3732,13 +3732,13 @@ func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeFloatPtr wire decodes a *float32 pointer as a Float. -func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Float32Ptr() if *vp == nil { @@ -3757,14 +3757,14 @@ var coderFloatPtr = pointerCoderFuncs{ } // sizeFloatSlice returns the size of wire encoding a []float32 pointer as a repeated Float. -func sizeFloatSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloatSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Float32Slice() size = len(s) * (f.tagsize + protowire.SizeFixed32()) return size } // appendFloatSlice encodes a []float32 pointer as a repeated Float. -func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Float32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -3774,18 +3774,18 @@ func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeFloatSlice wire decodes a []float32 pointer as a repeated Float. -func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, math.Float32frombits(v)) b = b[n:] @@ -3799,7 +3799,7 @@ func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, math.Float32frombits(v)) out.n = n @@ -3814,7 +3814,7 @@ var coderFloatSlice = pointerCoderFuncs{ } // sizeFloatPackedSlice returns the size of wire encoding a []float32 pointer as a packed repeated Float. -func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Float32Slice() if len(s) == 0 { return 0 @@ -3824,7 +3824,7 @@ func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendFloatPackedSlice encodes a []float32 pointer as a packed repeated Float. -func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Float32Slice() if len(s) == 0 { return b, nil @@ -3846,25 +3846,25 @@ var coderFloatPackedSlice = pointerCoderFuncs{ } // sizeFloatValue returns the size of wire encoding a float32 value as a Float. -func sizeFloatValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeFloatValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed32() } // appendFloatValue encodes a float32 value as a Float. -func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) return b, nil } // consumeFloatValue decodes a float32 value as a Float. -func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), out, nil @@ -3878,14 +3878,14 @@ var coderFloatValue = valueCoderFuncs{ } // sizeFloatSliceValue returns the size of wire encoding a []float32 value as a repeated Float. -func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed32()) return size } // appendFloatSliceValue encodes a []float32 value as a repeated Float. -func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -3896,17 +3896,17 @@ func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeFloatSliceValue wire decodes a []float32 value as a repeated Float. -func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) b = b[n:] @@ -3919,7 +3919,7 @@ func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) out.n = n @@ -3934,7 +3934,7 @@ var coderFloatSliceValue = valueCoderFuncs{ } // sizeFloatPackedSliceValue returns the size of wire encoding a []float32 value as a packed repeated Float. -func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3945,7 +3945,7 @@ func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalO } // appendFloatPackedSliceValue encodes a []float32 value as a packed repeated Float. -func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3969,13 +3969,13 @@ var coderFloatPackedSliceValue = valueCoderFuncs{ } // sizeSfixed64 returns the size of wire encoding a int64 pointer as a Sfixed64. -func sizeSfixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendSfixed64 wire encodes a int64 pointer as a Sfixed64. -func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, uint64(v)) @@ -3983,13 +3983,13 @@ func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeSfixed64 wire decodes a int64 pointer as a Sfixed64. -func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int64() = int64(v) out.n = n @@ -4005,7 +4005,7 @@ var coderSfixed64 = pointerCoderFuncs{ // sizeSfixed64NoZero returns the size of wire encoding a int64 pointer as a Sfixed64. // The zero value is not encoded. -func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() if v == 0 { return 0 @@ -4015,7 +4015,7 @@ func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size in // appendSfixed64NoZero wire encodes a int64 pointer as a Sfixed64. // The zero value is not encoded. -func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() if v == 0 { return b, nil @@ -4034,13 +4034,13 @@ var coderSfixed64NoZero = pointerCoderFuncs{ // sizeSfixed64Ptr returns the size of wire encoding a *int64 pointer as a Sfixed64. // It panics if the pointer is nil. -func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendSfixed64Ptr wire encodes a *int64 pointer as a Sfixed64. // It panics if the pointer is nil. -func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, uint64(v)) @@ -4048,13 +4048,13 @@ func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeSfixed64Ptr wire decodes a *int64 pointer as a Sfixed64. -func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int64Ptr() if *vp == nil { @@ -4073,14 +4073,14 @@ var coderSfixed64Ptr = pointerCoderFuncs{ } // sizeSfixed64Slice returns the size of wire encoding a []int64 pointer as a repeated Sfixed64. -func sizeSfixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() size = len(s) * (f.tagsize + protowire.SizeFixed64()) return size } // appendSfixed64Slice encodes a []int64 pointer as a repeated Sfixed64. -func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -4090,18 +4090,18 @@ func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOption } // consumeSfixed64Slice wire decodes a []int64 pointer as a repeated Sfixed64. -func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int64(v)) b = b[n:] @@ -4115,7 +4115,7 @@ func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFiel } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int64(v)) out.n = n @@ -4130,7 +4130,7 @@ var coderSfixed64Slice = pointerCoderFuncs{ } // sizeSfixed64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sfixed64. -func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() if len(s) == 0 { return 0 @@ -4140,7 +4140,7 @@ func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (si } // appendSfixed64PackedSlice encodes a []int64 pointer as a packed repeated Sfixed64. -func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() if len(s) == 0 { return b, nil @@ -4162,25 +4162,25 @@ var coderSfixed64PackedSlice = pointerCoderFuncs{ } // sizeSfixed64Value returns the size of wire encoding a int64 value as a Sfixed64. -func sizeSfixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeSfixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed64() } // appendSfixed64Value encodes a int64 value as a Sfixed64. -func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed64(b, uint64(v.Int())) return b, nil } // consumeSfixed64Value decodes a int64 value as a Sfixed64. -func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt64(int64(v)), out, nil @@ -4194,14 +4194,14 @@ var coderSfixed64Value = valueCoderFuncs{ } // sizeSfixed64SliceValue returns the size of wire encoding a []int64 value as a repeated Sfixed64. -func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed64()) return size } // appendSfixed64SliceValue encodes a []int64 value as a repeated Sfixed64. -func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -4212,17 +4212,17 @@ func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64 } // consumeSfixed64SliceValue wire decodes a []int64 value as a repeated Sfixed64. -func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) b = b[n:] @@ -4235,7 +4235,7 @@ func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.N } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) out.n = n @@ -4250,7 +4250,7 @@ var coderSfixed64SliceValue = valueCoderFuncs{ } // sizeSfixed64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sfixed64. -func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4261,7 +4261,7 @@ func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsh } // appendSfixed64PackedSliceValue encodes a []int64 value as a packed repeated Sfixed64. -func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4285,13 +4285,13 @@ var coderSfixed64PackedSliceValue = valueCoderFuncs{ } // sizeFixed64 returns the size of wire encoding a uint64 pointer as a Fixed64. -func sizeFixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendFixed64 wire encodes a uint64 pointer as a Fixed64. -func appendFixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, v) @@ -4299,13 +4299,13 @@ func appendFixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([] } // consumeFixed64 wire decodes a uint64 pointer as a Fixed64. -func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Uint64() = v out.n = n @@ -4321,7 +4321,7 @@ var coderFixed64 = pointerCoderFuncs{ // sizeFixed64NoZero returns the size of wire encoding a uint64 pointer as a Fixed64. // The zero value is not encoded. -func sizeFixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint64() if v == 0 { return 0 @@ -4331,7 +4331,7 @@ func sizeFixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int // appendFixed64NoZero wire encodes a uint64 pointer as a Fixed64. // The zero value is not encoded. -func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint64() if v == 0 { return b, nil @@ -4350,13 +4350,13 @@ var coderFixed64NoZero = pointerCoderFuncs{ // sizeFixed64Ptr returns the size of wire encoding a *uint64 pointer as a Fixed64. // It panics if the pointer is nil. -func sizeFixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendFixed64Ptr wire encodes a *uint64 pointer as a Fixed64. // It panics if the pointer is nil. -func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Uint64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, v) @@ -4364,13 +4364,13 @@ func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeFixed64Ptr wire decodes a *uint64 pointer as a Fixed64. -func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Uint64Ptr() if *vp == nil { @@ -4389,14 +4389,14 @@ var coderFixed64Ptr = pointerCoderFuncs{ } // sizeFixed64Slice returns the size of wire encoding a []uint64 pointer as a repeated Fixed64. -func sizeFixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint64Slice() size = len(s) * (f.tagsize + protowire.SizeFixed64()) return size } // appendFixed64Slice encodes a []uint64 pointer as a repeated Fixed64. -func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -4406,18 +4406,18 @@ func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions } // consumeFixed64Slice wire decodes a []uint64 pointer as a repeated Fixed64. -func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, v) b = b[n:] @@ -4431,7 +4431,7 @@ func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderField } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, v) out.n = n @@ -4446,7 +4446,7 @@ var coderFixed64Slice = pointerCoderFuncs{ } // sizeFixed64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Fixed64. -func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint64Slice() if len(s) == 0 { return 0 @@ -4456,7 +4456,7 @@ func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (siz } // appendFixed64PackedSlice encodes a []uint64 pointer as a packed repeated Fixed64. -func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint64Slice() if len(s) == 0 { return b, nil @@ -4478,25 +4478,25 @@ var coderFixed64PackedSlice = pointerCoderFuncs{ } // sizeFixed64Value returns the size of wire encoding a uint64 value as a Fixed64. -func sizeFixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeFixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed64() } // appendFixed64Value encodes a uint64 value as a Fixed64. -func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed64(b, v.Uint()) return b, nil } // consumeFixed64Value decodes a uint64 value as a Fixed64. -func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfUint64(v), out, nil @@ -4510,14 +4510,14 @@ var coderFixed64Value = valueCoderFuncs{ } // sizeFixed64SliceValue returns the size of wire encoding a []uint64 value as a repeated Fixed64. -func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed64()) return size } // appendFixed64SliceValue encodes a []uint64 value as a repeated Fixed64. -func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -4528,17 +4528,17 @@ func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeFixed64SliceValue wire decodes a []uint64 value as a repeated Fixed64. -func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint64(v)) b = b[n:] @@ -4551,7 +4551,7 @@ func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Nu } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint64(v)) out.n = n @@ -4566,7 +4566,7 @@ var coderFixed64SliceValue = valueCoderFuncs{ } // sizeFixed64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Fixed64. -func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4577,7 +4577,7 @@ func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsha } // appendFixed64PackedSliceValue encodes a []uint64 value as a packed repeated Fixed64. -func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4601,13 +4601,13 @@ var coderFixed64PackedSliceValue = valueCoderFuncs{ } // sizeDouble returns the size of wire encoding a float64 pointer as a Double. -func sizeDouble(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDouble(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendDouble wire encodes a float64 pointer as a Double. -func appendDouble(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDouble(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Float64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, math.Float64bits(v)) @@ -4615,13 +4615,13 @@ func appendDouble(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeDouble wire decodes a float64 pointer as a Double. -func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Float64() = math.Float64frombits(v) out.n = n @@ -4637,7 +4637,7 @@ var coderDouble = pointerCoderFuncs{ // sizeDoubleNoZero returns the size of wire encoding a float64 pointer as a Double. // The zero value is not encoded. -func sizeDoubleNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDoubleNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Float64() if v == 0 && !math.Signbit(float64(v)) { return 0 @@ -4647,7 +4647,7 @@ func sizeDoubleNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendDoubleNoZero wire encodes a float64 pointer as a Double. // The zero value is not encoded. -func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Float64() if v == 0 && !math.Signbit(float64(v)) { return b, nil @@ -4666,13 +4666,13 @@ var coderDoubleNoZero = pointerCoderFuncs{ // sizeDoublePtr returns the size of wire encoding a *float64 pointer as a Double. // It panics if the pointer is nil. -func sizeDoublePtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDoublePtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendDoublePtr wire encodes a *float64 pointer as a Double. // It panics if the pointer is nil. -func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Float64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, math.Float64bits(v)) @@ -4680,13 +4680,13 @@ func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeDoublePtr wire decodes a *float64 pointer as a Double. -func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Float64Ptr() if *vp == nil { @@ -4705,14 +4705,14 @@ var coderDoublePtr = pointerCoderFuncs{ } // sizeDoubleSlice returns the size of wire encoding a []float64 pointer as a repeated Double. -func sizeDoubleSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDoubleSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Float64Slice() size = len(s) * (f.tagsize + protowire.SizeFixed64()) return size } // appendDoubleSlice encodes a []float64 pointer as a repeated Double. -func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Float64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -4722,18 +4722,18 @@ func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeDoubleSlice wire decodes a []float64 pointer as a repeated Double. -func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, math.Float64frombits(v)) b = b[n:] @@ -4747,7 +4747,7 @@ func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, math.Float64frombits(v)) out.n = n @@ -4762,7 +4762,7 @@ var coderDoubleSlice = pointerCoderFuncs{ } // sizeDoublePackedSlice returns the size of wire encoding a []float64 pointer as a packed repeated Double. -func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Float64Slice() if len(s) == 0 { return 0 @@ -4772,7 +4772,7 @@ func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendDoublePackedSlice encodes a []float64 pointer as a packed repeated Double. -func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Float64Slice() if len(s) == 0 { return b, nil @@ -4794,25 +4794,25 @@ var coderDoublePackedSlice = pointerCoderFuncs{ } // sizeDoubleValue returns the size of wire encoding a float64 value as a Double. -func sizeDoubleValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeDoubleValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed64() } // appendDoubleValue encodes a float64 value as a Double. -func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) return b, nil } // consumeDoubleValue decodes a float64 value as a Double. -func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfFloat64(math.Float64frombits(v)), out, nil @@ -4826,14 +4826,14 @@ var coderDoubleValue = valueCoderFuncs{ } // sizeDoubleSliceValue returns the size of wire encoding a []float64 value as a repeated Double. -func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed64()) return size } // appendDoubleSliceValue encodes a []float64 value as a repeated Double. -func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -4844,17 +4844,17 @@ func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeDoubleSliceValue wire decodes a []float64 value as a repeated Double. -func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) b = b[n:] @@ -4867,7 +4867,7 @@ func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Num } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) out.n = n @@ -4882,7 +4882,7 @@ var coderDoubleSliceValue = valueCoderFuncs{ } // sizeDoublePackedSliceValue returns the size of wire encoding a []float64 value as a packed repeated Double. -func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4893,7 +4893,7 @@ func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendDoublePackedSliceValue encodes a []float64 value as a packed repeated Double. -func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4917,13 +4917,13 @@ var coderDoublePackedSliceValue = valueCoderFuncs{ } // sizeString returns the size of wire encoding a string pointer as a String. -func sizeString(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeString(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.String() return f.tagsize + protowire.SizeBytes(len(v)) } // appendString wire encodes a string pointer as a String. -func appendString(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendString(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.String() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendString(b, v) @@ -4931,15 +4931,15 @@ func appendString(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeString wire decodes a string pointer as a String. -func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - *p.String() = v + *p.String() = string(v) out.n = n return out, nil } @@ -4952,7 +4952,7 @@ var coderString = pointerCoderFuncs{ } // appendStringValidateUTF8 wire encodes a string pointer as a String. -func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.String() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendString(b, v) @@ -4963,18 +4963,18 @@ func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalO } // consumeStringValidateUTF8 wire decodes a string pointer as a String. -func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - if !utf8.ValidString(v) { + if !utf8.Valid(v) { return out, errInvalidUTF8{} } - *p.String() = v + *p.String() = string(v) out.n = n return out, nil } @@ -4988,7 +4988,7 @@ var coderStringValidateUTF8 = pointerCoderFuncs{ // sizeStringNoZero returns the size of wire encoding a string pointer as a String. // The zero value is not encoded. -func sizeStringNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeStringNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.String() if len(v) == 0 { return 0 @@ -4998,7 +4998,7 @@ func sizeStringNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendStringNoZero wire encodes a string pointer as a String. // The zero value is not encoded. -func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.String() if len(v) == 0 { return b, nil @@ -5017,7 +5017,7 @@ var coderStringNoZero = pointerCoderFuncs{ // appendStringNoZeroValidateUTF8 wire encodes a string pointer as a String. // The zero value is not encoded. -func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.String() if len(v) == 0 { return b, nil @@ -5039,14 +5039,14 @@ var coderStringNoZeroValidateUTF8 = pointerCoderFuncs{ // sizeStringPtr returns the size of wire encoding a *string pointer as a String. // It panics if the pointer is nil. -func sizeStringPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeStringPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.StringPtr() return f.tagsize + protowire.SizeBytes(len(v)) } // appendStringPtr wire encodes a *string pointer as a String. // It panics if the pointer is nil. -func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.StringPtr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendString(b, v) @@ -5054,19 +5054,19 @@ func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeStringPtr wire decodes a *string pointer as a String. -func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.StringPtr() if *vp == nil { *vp = new(string) } - **vp = v + **vp = string(v) out.n = n return out, nil } @@ -5080,7 +5080,7 @@ var coderStringPtr = pointerCoderFuncs{ // appendStringPtrValidateUTF8 wire encodes a *string pointer as a String. // It panics if the pointer is nil. -func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.StringPtr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendString(b, v) @@ -5091,22 +5091,22 @@ func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marsh } // consumeStringPtrValidateUTF8 wire decodes a *string pointer as a String. -func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - if !utf8.ValidString(v) { + if !utf8.Valid(v) { return out, errInvalidUTF8{} } vp := p.StringPtr() if *vp == nil { *vp = new(string) } - **vp = v + **vp = string(v) out.n = n return out, nil } @@ -5119,7 +5119,7 @@ var coderStringPtrValidateUTF8 = pointerCoderFuncs{ } // sizeStringSlice returns the size of wire encoding a []string pointer as a repeated String. -func sizeStringSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeStringSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.StringSlice() for _, v := range s { size += f.tagsize + protowire.SizeBytes(len(v)) @@ -5128,7 +5128,7 @@ func sizeStringSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendStringSlice encodes a []string pointer as a repeated String. -func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.StringSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -5138,16 +5138,16 @@ func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeStringSlice wire decodes a []string pointer as a repeated String. -func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.StringSlice() if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - *sp = append(*sp, v) + *sp = append(*sp, string(v)) out.n = n return out, nil } @@ -5160,7 +5160,7 @@ var coderStringSlice = pointerCoderFuncs{ } // appendStringSliceValidateUTF8 encodes a []string pointer as a repeated String. -func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.StringSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -5173,19 +5173,19 @@ func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ mar } // consumeStringSliceValidateUTF8 wire decodes a []string pointer as a repeated String. -func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.StringSlice() +func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - if !utf8.ValidString(v) { + if !utf8.Valid(v) { return out, errInvalidUTF8{} } - *sp = append(*sp, v) + sp := p.StringSlice() + *sp = append(*sp, string(v)) out.n = n return out, nil } @@ -5198,25 +5198,25 @@ var coderStringSliceValidateUTF8 = pointerCoderFuncs{ } // sizeStringValue returns the size of wire encoding a string value as a String. -func sizeStringValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeStringValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeBytes(len(v.String())) } // appendStringValue encodes a string value as a String. -func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendString(b, v.String()) return b, nil } // consumeStringValue decodes a string value as a String. -func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfString(string(v)), out, nil @@ -5230,7 +5230,7 @@ var coderStringValue = valueCoderFuncs{ } // appendStringValueValidateUTF8 encodes a string value as a String. -func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendString(b, v.String()) if !utf8.ValidString(v.String()) { @@ -5240,15 +5240,15 @@ func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint6 } // consumeStringValueValidateUTF8 decodes a string value as a String. -func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } - if !utf8.ValidString(v) { + if !utf8.Valid(v) { return protoreflect.Value{}, out, errInvalidUTF8{} } out.n = n @@ -5263,7 +5263,7 @@ var coderStringValueValidateUTF8 = valueCoderFuncs{ } // sizeStringSliceValue returns the size of wire encoding a []string value as a repeated String. -func sizeStringSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeStringSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -5273,7 +5273,7 @@ func sizeStringSliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendStringSliceValue encodes a []string value as a repeated String. -func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -5284,14 +5284,14 @@ func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeStringSliceValue wire decodes a []string value as a repeated String. -func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfString(string(v))) out.n = n @@ -5306,13 +5306,13 @@ var coderStringSliceValue = valueCoderFuncs{ } // sizeBytes returns the size of wire encoding a []byte pointer as a Bytes. -func sizeBytes(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBytes(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Bytes() return f.tagsize + protowire.SizeBytes(len(v)) } // appendBytes wire encodes a []byte pointer as a Bytes. -func appendBytes(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytes(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bytes() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendBytes(b, v) @@ -5320,13 +5320,13 @@ func appendBytes(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by } // consumeBytes wire decodes a []byte pointer as a Bytes. -func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Bytes() = append(emptyBuf[:], v...) out.n = n @@ -5341,7 +5341,7 @@ var coderBytes = pointerCoderFuncs{ } // appendBytesValidateUTF8 wire encodes a []byte pointer as a Bytes. -func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bytes() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendBytes(b, v) @@ -5352,13 +5352,13 @@ func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOp } // consumeBytesValidateUTF8 wire decodes a []byte pointer as a Bytes. -func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if !utf8.Valid(v) { return out, errInvalidUTF8{} @@ -5377,7 +5377,7 @@ var coderBytesValidateUTF8 = pointerCoderFuncs{ // sizeBytesNoZero returns the size of wire encoding a []byte pointer as a Bytes. // The zero value is not encoded. -func sizeBytesNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBytesNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Bytes() if len(v) == 0 { return 0 @@ -5387,7 +5387,7 @@ func sizeBytesNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendBytesNoZero wire encodes a []byte pointer as a Bytes. // The zero value is not encoded. -func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bytes() if len(v) == 0 { return b, nil @@ -5399,13 +5399,13 @@ func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) // consumeBytesNoZero wire decodes a []byte pointer as a Bytes. // The zero value is not decoded. -func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Bytes() = append(([]byte)(nil), v...) out.n = n @@ -5421,7 +5421,7 @@ var coderBytesNoZero = pointerCoderFuncs{ // appendBytesNoZeroValidateUTF8 wire encodes a []byte pointer as a Bytes. // The zero value is not encoded. -func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bytes() if len(v) == 0 { return b, nil @@ -5435,13 +5435,13 @@ func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ mar } // consumeBytesNoZeroValidateUTF8 wire decodes a []byte pointer as a Bytes. -func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if !utf8.Valid(v) { return out, errInvalidUTF8{} @@ -5459,7 +5459,7 @@ var coderBytesNoZeroValidateUTF8 = pointerCoderFuncs{ } // sizeBytesSlice returns the size of wire encoding a [][]byte pointer as a repeated Bytes. -func sizeBytesSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBytesSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.BytesSlice() for _, v := range s { size += f.tagsize + protowire.SizeBytes(len(v)) @@ -5468,7 +5468,7 @@ func sizeBytesSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { } // appendBytesSlice encodes a [][]byte pointer as a repeated Bytes. -func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.BytesSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -5478,14 +5478,14 @@ func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeBytesSlice wire decodes a [][]byte pointer as a repeated Bytes. -func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.BytesSlice() if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, append(emptyBuf[:], v...)) out.n = n @@ -5500,7 +5500,7 @@ var coderBytesSlice = pointerCoderFuncs{ } // appendBytesSliceValidateUTF8 encodes a [][]byte pointer as a repeated Bytes. -func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.BytesSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -5513,18 +5513,18 @@ func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ mars } // consumeBytesSliceValidateUTF8 wire decodes a [][]byte pointer as a repeated Bytes. -func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.BytesSlice() +func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if !utf8.Valid(v) { return out, errInvalidUTF8{} } + sp := p.BytesSlice() *sp = append(*sp, append(emptyBuf[:], v...)) out.n = n return out, nil @@ -5538,25 +5538,25 @@ var coderBytesSliceValidateUTF8 = pointerCoderFuncs{ } // sizeBytesValue returns the size of wire encoding a []byte value as a Bytes. -func sizeBytesValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeBytesValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeBytes(len(v.Bytes())) } // appendBytesValue encodes a []byte value as a Bytes. -func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendBytes(b, v.Bytes()) return b, nil } // consumeBytesValue decodes a []byte value as a Bytes. -func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), out, nil @@ -5570,7 +5570,7 @@ var coderBytesValue = valueCoderFuncs{ } // sizeBytesSliceValue returns the size of wire encoding a [][]byte value as a repeated Bytes. -func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -5580,7 +5580,7 @@ func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions } // appendBytesSliceValue encodes a [][]byte value as a repeated Bytes. -func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -5591,14 +5591,14 @@ func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeBytesSliceValue wire decodes a [][]byte value as a repeated Bytes. -func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) out.n = n diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index 44885a761..c1245fef4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -5,7 +5,6 @@ package impl import ( - "errors" "reflect" "sort" @@ -118,7 +117,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo } b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } var ( key = mapi.keyZero @@ -127,10 +126,10 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo for len(b) > 0 { num, wtyp, n := protowire.ConsumeTag(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if num > protowire.MaxValidNumber { - return out, errors.New("invalid field number") + return out, errDecode } b = b[n:] err := errUnknown @@ -157,7 +156,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo if err == errUnknown { n = protowire.ConsumeFieldValue(num, wtyp, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } } else if err != nil { return out, err @@ -175,7 +174,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi } b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } var ( key = mapi.keyZero @@ -184,10 +183,10 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi for len(b) > 0 { num, wtyp, n := protowire.ConsumeTag(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if num > protowire.MaxValidNumber { - return out, errors.New("invalid field number") + return out, errDecode } b = b[n:] err := errUnknown @@ -208,7 +207,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi var v []byte v, n = protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } var o unmarshalOutput o, err = f.mi.unmarshalPointer(v, pointerOfValue(val), 0, opts) @@ -221,7 +220,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi if err == errUnknown { n = protowire.ConsumeFieldValue(num, wtyp, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } } else if err != nil { return out, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go index 2706bb67f..4b15493f2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.12 // +build !go1.12 package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go index 1533ef600..0b31b66ea 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.12 // +build go1.12 package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 0e176d565..cd40527ff 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -11,7 +11,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/fieldsort" + "google.golang.org/protobuf/internal/order" pref "google.golang.org/protobuf/reflect/protoreflect" piface "google.golang.org/protobuf/runtime/protoiface" ) @@ -27,6 +27,7 @@ type coderMessageInfo struct { coderFields map[protowire.Number]*coderFieldInfo sizecacheOffset offset unknownOffset offset + unknownPtrKind bool extensionOffset offset needsInitCheck bool isMessageSet bool @@ -47,9 +48,20 @@ type coderFieldInfo struct { } func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { - mi.sizecacheOffset = si.sizecacheOffset - mi.unknownOffset = si.unknownOffset - mi.extensionOffset = si.extensionOffset + mi.sizecacheOffset = invalidOffset + mi.unknownOffset = invalidOffset + mi.extensionOffset = invalidOffset + + if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType { + mi.sizecacheOffset = si.sizecacheOffset + } + if si.unknownOffset.IsValid() && (si.unknownType == unknownFieldsAType || si.unknownType == unknownFieldsBType) { + mi.unknownOffset = si.unknownOffset + mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr + } + if si.extensionOffset.IsValid() && si.extensionType == extensionFieldsType { + mi.extensionOffset = si.extensionOffset + } mi.coderFields = make(map[protowire.Number]*coderFieldInfo) fields := mi.Desc.Fields() @@ -73,6 +85,27 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { var funcs pointerCoderFuncs var childMessage *MessageInfo switch { + case ft == nil: + // This never occurs for generated message types. + // It implies that a hand-crafted type has missing Go fields + // for specific protobuf message fields. + funcs = pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return 0 + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return nil, nil + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + panic("missing Go struct field for " + string(fd.FullName())) + }, + merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + } case isOneof: fieldOffset = offsetOf(fs, mi.Exporter) case fd.IsWeak(): @@ -136,7 +169,7 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { sort.Slice(mi.orderedCoderFields, func(i, j int) bool { fi := fields.ByNumber(mi.orderedCoderFields[i].num) fj := fields.ByNumber(mi.orderedCoderFields[j].num) - return fieldsort.Less(fi, fj) + return order.LegacyFieldOrder(fi, fj) }) } @@ -157,3 +190,28 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { mi.methods.Merge = mi.merge } } + +// getUnknownBytes returns a *[]byte for the unknown fields. +// It is the caller's responsibility to check whether the pointer is nil. +// This function is specially designed to be inlineable. +func (mi *MessageInfo) getUnknownBytes(p pointer) *[]byte { + if mi.unknownPtrKind { + return *p.Apply(mi.unknownOffset).BytesPtr() + } else { + return p.Apply(mi.unknownOffset).Bytes() + } +} + +// mutableUnknownBytes returns a *[]byte for the unknown fields. +// The returned pointer is guaranteed to not be nil. +func (mi *MessageInfo) mutableUnknownBytes(p pointer) *[]byte { + if mi.unknownPtrKind { + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + *bp = new([]byte) + } + return *bp + } else { + return p.Apply(mi.unknownOffset).Bytes() + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go index cfb68e12f..b7a23faf1 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -29,8 +29,9 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) } - unknown := *p.Apply(mi.unknownOffset).Bytes() - size += messageset.SizeUnknown(unknown) + if u := mi.getUnknownBytes(p); u != nil { + size += messageset.SizeUnknown(*u) + } return size } @@ -69,10 +70,12 @@ func marshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts marshalOptions } } - unknown := *p.Apply(mi.unknownOffset).Bytes() - b, err := messageset.AppendUnknown(b, unknown) - if err != nil { - return b, err + if u := mi.getUnknownBytes(p); u != nil { + var err error + b, err = messageset.AppendUnknown(b, *u) + if err != nil { + return b, err + } } return b, nil @@ -100,13 +103,13 @@ func unmarshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts unmarshalOpt *ep = make(map[int32]ExtensionField) } ext := *ep - unknown := p.Apply(mi.unknownOffset).Bytes() initialized := true err = messageset.Unmarshal(b, true, func(num protowire.Number, v []byte) error { o, err := mi.unmarshalExtension(v, num, protowire.BytesType, ext, opts) if err == errUnknown { - *unknown = protowire.AppendTag(*unknown, num, protowire.BytesType) - *unknown = append(*unknown, v...) + u := mi.mutableUnknownBytes(p) + *u = protowire.AppendTag(*u, num, protowire.BytesType) + *u = append(*u, v...) return nil } if !o.initialized { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go index 86f7dc3c9..145c577bd 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package impl @@ -30,7 +31,7 @@ func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ } v, n := protowire.ConsumeVarint(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } p.v.Elem().SetInt(int64(v)) out.n = n @@ -130,12 +131,12 @@ func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeVarint(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } rv := reflect.New(s.Type().Elem()).Elem() rv.SetInt(int64(v)) @@ -150,7 +151,7 @@ func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf } v, n := protowire.ConsumeVarint(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } rv := reflect.New(s.Type().Elem()).Elem() rv.SetInt(int64(v)) diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index e118af1e2..757642e23 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 36a90dff3..acd61bb50 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -423,6 +423,13 @@ func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } + if c.isNonPointer() { + if v.CanAddr() { + v = v.Addr() // T => *T + } else { + v = reflect.Zero(reflect.PtrTo(v.Type())) + } + } if m, ok := v.Interface().(pref.ProtoMessage); ok { return pref.ValueOfMessage(m.ProtoReflect()) } @@ -437,6 +444,16 @@ func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { } else { rv = reflect.ValueOf(m.Interface()) } + if c.isNonPointer() { + if rv.Type() != reflect.PtrTo(c.goType) { + panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), reflect.PtrTo(c.goType))) + } + if !rv.IsNil() { + rv = rv.Elem() // *T => T + } else { + rv = reflect.Zero(rv.Type().Elem()) + } + } if rv.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), c.goType)) } @@ -451,6 +468,9 @@ func (c *messageConverter) IsValidPB(v pref.Value) bool { } else { rv = reflect.ValueOf(m.Interface()) } + if c.isNonPointer() { + return rv.Type() == reflect.PtrTo(c.goType) + } return rv.Type() == c.goType } @@ -459,9 +479,18 @@ func (c *messageConverter) IsValidGo(v reflect.Value) bool { } func (c *messageConverter) New() pref.Value { + if c.isNonPointer() { + return c.PBValueOf(reflect.New(c.goType).Elem()) + } return c.PBValueOf(reflect.New(c.goType.Elem())) } func (c *messageConverter) Zero() pref.Value { return c.PBValueOf(reflect.Zero(c.goType)) } + +// isNonPointer reports whether the type is a non-pointer type. +// This never occurs for generated message types. +func (c *messageConverter) isNonPointer() bool { + return c.goType.Kind() != reflect.Ptr +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go index 85ba1d3b3..c65b0325c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -17,12 +17,16 @@ import ( piface "google.golang.org/protobuf/runtime/protoiface" ) +var errDecode = errors.New("cannot parse invalid wire-format data") +var errRecursionDepth = errors.New("exceeded maximum recursion depth") + type unmarshalOptions struct { flags protoiface.UnmarshalInputFlags resolver interface { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + depth int } func (o unmarshalOptions) Options() proto.UnmarshalOptions { @@ -42,6 +46,7 @@ func (o unmarshalOptions) IsDefault() bool { var lazyUnmarshalOptions = unmarshalOptions{ resolver: preg.GlobalTypes, + depth: protowire.DefaultRecursionLimit, } type unmarshalOutput struct { @@ -60,6 +65,7 @@ func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutp out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{ flags: in.Flags, resolver: in.Resolver, + depth: in.Depth, }) var flags piface.UnmarshalOutputFlags if out.initialized { @@ -80,6 +86,10 @@ var errUnknown = errors.New("unknown") func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { mi.init() + opts.depth-- + if opts.depth < 0 { + return out, errRecursionDepth + } if flags.ProtoLegacy && mi.isMessageSet { return unmarshalMessageSet(mi, b, p, opts) } @@ -100,13 +110,13 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. var n int tag, n = protowire.ConsumeVarint(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } b = b[n:] } var num protowire.Number if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { - return out, errors.New("invalid field number") + return out, errDecode } else { num = protowire.Number(n) } @@ -114,7 +124,7 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. if wtyp == protowire.EndGroupType { if num != groupTag { - return out, errors.New("mismatching end group marker") + return out, errDecode } groupTag = 0 break @@ -170,10 +180,10 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. } n = protowire.ConsumeFieldValue(num, wtyp, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if !opts.DiscardUnknown() && mi.unknownOffset.IsValid() { - u := p.Apply(mi.unknownOffset).Bytes() + u := mi.mutableUnknownBytes(p) *u = protowire.AppendTag(*u, num, wtyp) *u = append(*u, b[:n]...) } @@ -181,7 +191,7 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. b = b[n:] } if groupTag != 0 { - return out, errors.New("missing end group marker") + return out, errDecode } if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) { initialized = false @@ -221,7 +231,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p return out, nil } case ValidationInvalid: - return out, errors.New("invalid wire format") + return out, errDecode case ValidationUnknown: } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index 8c8a794c6..845c67d6e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -79,8 +79,9 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int size += f.funcs.size(fptr, f, opts) } if mi.unknownOffset.IsValid() { - u := *p.Apply(mi.unknownOffset).Bytes() - size += len(u) + if u := mi.getUnknownBytes(p); u != nil { + size += len(*u) + } } if mi.sizecacheOffset.IsValid() { if size > math.MaxInt32 { @@ -141,8 +142,9 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt } } if mi.unknownOffset.IsValid() && !mi.isMessageSet { - u := *p.Apply(mi.unknownOffset).Bytes() - b = append(b, u...) + if u := mi.getUnknownBytes(p); u != nil { + b = append(b, (*u)...) + } } return b, nil } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go index c3d741c2f..e3fb0b578 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go @@ -30,7 +30,7 @@ func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.M if mv := (Export{}).protoMessageV2Of(m); mv != nil { return mv.ProtoReflect().Type() } - return legacyLoadMessageInfo(reflect.TypeOf(m), name) + return legacyLoadMessageType(reflect.TypeOf(m), name) } // UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input. diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 61757ce50..49e723161 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -154,7 +154,8 @@ func (x placeholderExtension) Number() pref.FieldNumber { retu func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 } func (x placeholderExtension) Kind() pref.Kind { return 0 } func (x placeholderExtension) HasJSONName() bool { return false } -func (x placeholderExtension) JSONName() string { return "" } +func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } func (x placeholderExtension) HasPresence() bool { return false } func (x placeholderExtension) HasOptionalKeyword() bool { return false } func (x placeholderExtension) IsExtension() bool { return true } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 06c68e117..029feeefd 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -24,14 +24,24 @@ import ( // legacyWrapMessage wraps v as a protoreflect.Message, // where v must be a *struct kind and not implement the v2 API already. func legacyWrapMessage(v reflect.Value) pref.Message { - typ := v.Type() - if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct { + t := v.Type() + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { return aberrantMessage{v: v} } - mt := legacyLoadMessageInfo(typ, "") + mt := legacyLoadMessageInfo(t, "") return mt.MessageOf(v.Interface()) } +// legacyLoadMessageType dynamically loads a protoreflect.Type for t, +// where t must be not implement the v2 API already. +// The provided name is used if it cannot be determined from the message. +func legacyLoadMessageType(t reflect.Type, name pref.FullName) protoreflect.MessageType { + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return aberrantMessageType{t} + } + return legacyLoadMessageInfo(t, name) +} + var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo // legacyLoadMessageInfo dynamically loads a *MessageInfo for t, @@ -49,8 +59,9 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { GoReflectType: t, } + var hasMarshal, hasUnmarshal bool v := reflect.Zero(t).Interface() - if _, ok := v.(legacyMarshaler); ok { + if _, hasMarshal = v.(legacyMarshaler); hasMarshal { mi.methods.Marshal = legacyMarshal // We have no way to tell whether the type's Marshal method @@ -59,10 +70,10 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { // calling Marshal methods when present. mi.methods.Flags |= piface.SupportMarshalDeterministic } - if _, ok := v.(legacyUnmarshaler); ok { + if _, hasUnmarshal = v.(legacyUnmarshaler); hasUnmarshal { mi.methods.Unmarshal = legacyUnmarshal } - if _, ok := v.(legacyMerger); ok { + if _, hasMerge := v.(legacyMerger); hasMerge || (hasMarshal && hasUnmarshal) { mi.methods.Merge = legacyMerge } @@ -75,7 +86,7 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDescriptor // LegacyLoadMessageDesc returns an MessageDescriptor derived from the Go type, -// which must be a *struct kind and not implement the v2 API already. +// which should be a *struct kind and must not implement the v2 API already. // // This is exported for testing purposes. func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor { @@ -114,17 +125,19 @@ func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescr // If the Go type has no fields, then this might be a proto3 empty message // from before the size cache was added. If there are any fields, check to // see that at least one of them looks like something we generated. - if nfield := t.Elem().NumField(); nfield > 0 { - hasProtoField := false - for i := 0; i < nfield; i++ { - f := t.Elem().Field(i) - if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") { - hasProtoField = true - break + if t.Elem().Kind() == reflect.Struct { + if nfield := t.Elem().NumField(); nfield > 0 { + hasProtoField := false + for i := 0; i < nfield; i++ { + f := t.Elem().Field(i) + if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") { + hasProtoField = true + break + } + } + if !hasProtoField { + return aberrantLoadMessageDesc(t, name) } - } - if !hasProtoField { - return aberrantLoadMessageDesc(t, name) } } @@ -370,7 +383,7 @@ type legacyMerger interface { Merge(protoiface.MessageV1) } -var legacyProtoMethods = &piface.Methods{ +var aberrantProtoMethods = &piface.Methods{ Marshal: legacyMarshal, Unmarshal: legacyUnmarshal, Merge: legacyMerge, @@ -401,18 +414,47 @@ func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { v := in.Message.(unwrapper).protoUnwrap() unmarshaler, ok := v.(legacyUnmarshaler) if !ok { - return piface.UnmarshalOutput{}, errors.New("%T does not implement Marshal", v) + return piface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) } return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) } func legacyMerge(in piface.MergeInput) piface.MergeOutput { + // Check whether this supports the legacy merger. dstv := in.Destination.(unwrapper).protoUnwrap() merger, ok := dstv.(legacyMerger) + if ok { + merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) + return piface.MergeOutput{Flags: piface.MergeComplete} + } + + // If legacy merger is unavailable, implement merge in terms of + // a marshal and unmarshal operation. + srcv := in.Source.(unwrapper).protoUnwrap() + marshaler, ok := srcv.(legacyMarshaler) + if !ok { + return piface.MergeOutput{} + } + dstv = in.Destination.(unwrapper).protoUnwrap() + unmarshaler, ok := dstv.(legacyUnmarshaler) if !ok { return piface.MergeOutput{} } - merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) + if !in.Source.IsValid() { + // Legacy Marshal methods may not function on nil messages. + // Check for a typed nil source only after we confirm that + // legacy Marshal/Unmarshal methods are present, for + // consistency. + return piface.MergeOutput{Flags: piface.MergeComplete} + } + b, err := marshaler.Marshal() + if err != nil { + return piface.MergeOutput{} + } + err = unmarshaler.Unmarshal(b) + if err != nil { + return piface.MergeOutput{} + } return piface.MergeOutput{Flags: piface.MergeComplete} } @@ -422,6 +464,9 @@ type aberrantMessageType struct { } func (mt aberrantMessageType) New() pref.Message { + if mt.t.Kind() == reflect.Ptr { + return aberrantMessage{reflect.New(mt.t.Elem())} + } return aberrantMessage{reflect.Zero(mt.t)} } func (mt aberrantMessageType) Zero() pref.Message { @@ -443,6 +488,17 @@ type aberrantMessage struct { v reflect.Value } +// Reset implements the v1 proto.Message.Reset method. +func (m aberrantMessage) Reset() { + if mr, ok := m.v.Interface().(interface{ Reset() }); ok { + mr.Reset() + return + } + if m.v.Kind() == reflect.Ptr && !m.v.IsNil() { + m.v.Elem().Set(reflect.Zero(m.v.Type().Elem())) + } +} + func (m aberrantMessage) ProtoReflect() pref.Message { return m } @@ -454,33 +510,40 @@ func (m aberrantMessage) Type() pref.MessageType { return aberrantMessageType{m.v.Type()} } func (m aberrantMessage) New() pref.Message { + if m.v.Type().Kind() == reflect.Ptr { + return aberrantMessage{reflect.New(m.v.Type().Elem())} + } return aberrantMessage{reflect.Zero(m.v.Type())} } func (m aberrantMessage) Interface() pref.ProtoMessage { return m } func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + return } func (m aberrantMessage) Has(pref.FieldDescriptor) bool { - panic("invalid field descriptor") + return false } func (m aberrantMessage) Clear(pref.FieldDescriptor) { - panic("invalid field descriptor") + panic("invalid Message.Clear on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) Get(pref.FieldDescriptor) pref.Value { - panic("invalid field descriptor") +func (m aberrantMessage) Get(fd pref.FieldDescriptor) pref.Value { + if fd.Default().IsValid() { + return fd.Default() + } + panic("invalid Message.Get on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) { - panic("invalid field descriptor") + panic("invalid Message.Set on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value { - panic("invalid field descriptor") + panic("invalid Message.Mutable on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value { - panic("invalid field descriptor") + panic("invalid Message.NewField on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor { - panic("invalid oneof descriptor") + panic("invalid Message.WhichOneof descriptor on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) GetUnknown() pref.RawFields { return nil @@ -489,13 +552,13 @@ func (m aberrantMessage) SetUnknown(pref.RawFields) { // SetUnknown discards its input on messages which don't support unknown field storage. } func (m aberrantMessage) IsValid() bool { - // An invalid message is a read-only, empty message. Since we don't know anything - // about the alleged contents of this message, we can't say with confidence that - // it is invalid in this sense. Therefore, report it as valid. - return true + if m.v.Kind() == reflect.Ptr { + return !m.v.IsNil() + } + return false } func (m aberrantMessage) ProtoMethods() *piface.Methods { - return legacyProtoMethods + return aberrantProtoMethods } func (m aberrantMessage) protoUnwrap() interface{} { return m.v.Interface() diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go index cdc4267df..c65bbc044 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/merge.go +++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -77,9 +77,9 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { } } if mi.unknownOffset.IsValid() { - du := dst.Apply(mi.unknownOffset).Bytes() - su := src.Apply(mi.unknownOffset).Bytes() - if len(*su) > 0 { + su := mi.getUnknownBytes(src) + if su != nil && len(*su) > 0 { + du := mi.mutableUnknownBytes(dst) *du = append(*du, *su...) } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index c026a9818..a104e28e8 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -15,6 +15,7 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" ) // MessageInfo provides protobuf related functionality for a given Go type @@ -109,22 +110,29 @@ func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) { type ( SizeCache = int32 WeakFields = map[int32]protoreflect.ProtoMessage - UnknownFields = []byte + UnknownFields = unknownFieldsA // TODO: switch to unknownFieldsB + unknownFieldsA = []byte + unknownFieldsB = *[]byte ExtensionFields = map[int32]ExtensionField ) var ( sizecacheType = reflect.TypeOf(SizeCache(0)) weakFieldsType = reflect.TypeOf(WeakFields(nil)) - unknownFieldsType = reflect.TypeOf(UnknownFields(nil)) + unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil)) + unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil)) extensionFieldsType = reflect.TypeOf(ExtensionFields(nil)) ) type structInfo struct { sizecacheOffset offset + sizecacheType reflect.Type weakOffset offset + weakType reflect.Type unknownOffset offset + unknownType reflect.Type extensionOffset offset + extensionType reflect.Type fieldsByNumber map[pref.FieldNumber]reflect.StructField oneofsByName map[pref.Name]reflect.StructField @@ -151,18 +159,22 @@ fieldLoop: case genid.SizeCache_goname, genid.SizeCacheA_goname: if f.Type == sizecacheType { si.sizecacheOffset = offsetOf(f, mi.Exporter) + si.sizecacheType = f.Type } case genid.WeakFields_goname, genid.WeakFieldsA_goname: if f.Type == weakFieldsType { si.weakOffset = offsetOf(f, mi.Exporter) + si.weakType = f.Type } case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: - if f.Type == unknownFieldsType { + if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { si.unknownOffset = offsetOf(f, mi.Exporter) + si.unknownType = f.Type } case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: if f.Type == extensionFieldsType { si.extensionOffset = offsetOf(f, mi.Exporter) + si.extensionType = f.Type } default: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { @@ -212,4 +224,53 @@ func (mi *MessageInfo) New() protoreflect.Message { func (mi *MessageInfo) Zero() protoreflect.Message { return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface()) } -func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { return mi.Desc } +func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { + return mi.Desc +} +func (mi *MessageInfo) Enum(i int) protoreflect.EnumType { + mi.init() + fd := mi.Desc.Fields().Get(i) + return Export{}.EnumTypeOf(mi.fieldTypes[fd.Number()]) +} +func (mi *MessageInfo) Message(i int) protoreflect.MessageType { + mi.init() + fd := mi.Desc.Fields().Get(i) + switch { + case fd.IsWeak(): + mt, _ := preg.GlobalTypes.FindMessageByName(fd.Message().FullName()) + return mt + case fd.IsMap(): + return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} + default: + return Export{}.MessageTypeOf(mi.fieldTypes[fd.Number()]) + } +} + +type mapEntryType struct { + desc protoreflect.MessageDescriptor + valType interface{} // zero value of enum or message type +} + +func (mt mapEntryType) New() protoreflect.Message { + return nil +} +func (mt mapEntryType) Zero() protoreflect.Message { + return nil +} +func (mt mapEntryType) Descriptor() protoreflect.MessageDescriptor { + return mt.desc +} +func (mt mapEntryType) Enum(i int) protoreflect.EnumType { + fd := mt.desc.Fields().Get(i) + if fd.Enum() == nil { + return nil + } + return Export{}.EnumTypeOf(mt.valType) +} +func (mt mapEntryType) Message(i int) protoreflect.MessageType { + fd := mt.desc.Fields().Get(i) + if fd.Message() == nil { + return nil + } + return Export{}.MessageTypeOf(mt.valType) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index 0f4b8db76..9488b7261 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -8,6 +8,7 @@ import ( "fmt" "reflect" + "google.golang.org/protobuf/internal/detrand" "google.golang.org/protobuf/internal/pragma" pref "google.golang.org/protobuf/reflect/protoreflect" ) @@ -16,6 +17,11 @@ type reflectMessageInfo struct { fields map[pref.FieldNumber]*fieldInfo oneofs map[pref.Name]*oneofInfo + // fieldTypes contains the zero value of an enum or message field. + // For lists, it contains the element type. + // For maps, it contains the entry value type. + fieldTypes map[pref.FieldNumber]interface{} + // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) // It provides faster access to the fieldInfo, but may be incomplete. @@ -36,6 +42,7 @@ func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) { mi.makeKnownFieldsFunc(si) mi.makeUnknownFieldsFunc(t, si) mi.makeExtensionFieldsFunc(t, si) + mi.makeFieldTypes(si) } // makeKnownFieldsFunc generates functions for operations that can be performed @@ -51,17 +58,23 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { for i := 0; i < fds.Len(); i++ { fd := fds.Get(i) fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } var fi fieldInfo switch { - case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): - fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) + case fs.Type == nil: + fi = fieldInfoForMissing(fd) // never occurs for officially generated message types + case isOneof: + fi = fieldInfoForOneof(fd, fs, mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) case fd.IsMap(): fi = fieldInfoForMap(fd, fs, mi.Exporter) case fd.IsList(): fi = fieldInfoForList(fd, fs, mi.Exporter) case fd.IsWeak(): fi = fieldInfoForWeakMessage(fd, si.weakOffset) - case fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind: + case fd.Message() != nil: fi = fieldInfoForMessage(fd, fs, mi.Exporter) default: fi = fieldInfoForScalar(fd, fs, mi.Exporter) @@ -92,27 +105,53 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { i++ } } + + // Introduce instability to iteration order, but keep it deterministic. + if len(mi.rangeInfos) > 1 && detrand.Bool() { + i := detrand.Intn(len(mi.rangeInfos) - 1) + mi.rangeInfos[i], mi.rangeInfos[i+1] = mi.rangeInfos[i+1], mi.rangeInfos[i] + } } func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { - mi.getUnknown = func(pointer) pref.RawFields { return nil } - mi.setUnknown = func(pointer, pref.RawFields) { return } - if si.unknownOffset.IsValid() { + switch { + case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsAType: + // Handle as []byte. mi.getUnknown = func(p pointer) pref.RawFields { if p.IsNil() { return nil } - rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType) - return pref.RawFields(*rv.Interface().(*[]byte)) + return *p.Apply(mi.unknownOffset).Bytes() } mi.setUnknown = func(p pointer, b pref.RawFields) { if p.IsNil() { panic("invalid SetUnknown on nil Message") } - rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType) - *rv.Interface().(*[]byte) = []byte(b) + *p.Apply(mi.unknownOffset).Bytes() = b } - } else { + case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsBType: + // Handle as *[]byte. + mi.getUnknown = func(p pointer) pref.RawFields { + if p.IsNil() { + return nil + } + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + return nil + } + return **bp + } + mi.setUnknown = func(p pointer, b pref.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + *bp = new([]byte) + } + **bp = b + } + default: mi.getUnknown = func(pointer) pref.RawFields { return nil } @@ -139,6 +178,58 @@ func (mi *MessageInfo) makeExtensionFieldsFunc(t reflect.Type, si structInfo) { } } } +func (mi *MessageInfo) makeFieldTypes(si structInfo) { + md := mi.Desc + fds := md.Fields() + for i := 0; i < fds.Len(); i++ { + var ft reflect.Type + fd := fds.Get(i) + fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + var isMessage bool + switch { + case fs.Type == nil: + continue // never occurs for officially generated message types + case isOneof: + if fd.Enum() != nil || fd.Message() != nil { + ft = si.oneofWrappersByNumber[fd.Number()].Field(0).Type + } + case fd.IsMap(): + if fd.MapValue().Enum() != nil || fd.MapValue().Message() != nil { + ft = fs.Type.Elem() + } + isMessage = fd.MapValue().Message() != nil + case fd.IsList(): + if fd.Enum() != nil || fd.Message() != nil { + ft = fs.Type.Elem() + } + isMessage = fd.Message() != nil + case fd.Enum() != nil: + ft = fs.Type + if fd.HasPresence() && ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + case fd.Message() != nil: + ft = fs.Type + if fd.IsWeak() { + ft = nil + } + isMessage = true + } + if isMessage && ft != nil && ft.Kind() != reflect.Ptr { + ft = reflect.PtrTo(ft) // never occurs for officially generated message types + } + if ft != nil { + if mi.fieldTypes == nil { + mi.fieldTypes = make(map[pref.FieldNumber]interface{}) + } + mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() + } + } +} type extensionMap map[int32]ExtensionField @@ -306,7 +397,6 @@ var ( // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { - // TODO: Switch the input to be an opaque Pointer. if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -320,6 +410,17 @@ func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { func (m *messageReflectWrapper) pointer() pointer { return m.p } func (m *messageReflectWrapper) messageInfo() *MessageInfo { return m.mi } +// Reset implements the v1 proto.Message.Reset method. +func (m *messageIfaceWrapper) Reset() { + if mr, ok := m.protoUnwrap().(interface{ Reset() }); ok { + mr.Reset() + return + } + rv := reflect.ValueOf(m.protoUnwrap()) + if rv.Kind() == reflect.Ptr && !rv.IsNil() { + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + } +} func (m *messageIfaceWrapper) ProtoReflect() pref.Message { return (*messageReflectWrapper)(m) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 23124a86e..343cf8721 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -28,6 +28,39 @@ type fieldInfo struct { newField func() pref.Value } +func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { + // This never occurs for generated message types. + // It implies that a hand-crafted type has missing Go fields + // for specific protobuf message fields. + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + return false + }, + clear: func(p pointer) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + get: func(p pointer) pref.Value { + return fd.Default() + }, + set: func(p pointer, v pref.Value) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + mutable: func(p pointer) pref.Value { + panic("missing Go struct field for " + string(fd.FullName())) + }, + newMessage: func() pref.Message { + panic("missing Go struct field for " + string(fd.FullName())) + }, + newField: func() pref.Value { + if v := fd.Default(); v.IsValid() { + return v + } + panic("missing Go struct field for " + string(fd.FullName())) + }, + } +} + func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Interface { @@ -97,7 +130,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export rv.Set(reflect.New(ot)) } rv = rv.Elem().Elem().Field(0) - if rv.IsNil() { + if rv.Kind() == reflect.Ptr && rv.IsNil() { rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message()))) } return conv.PBValueOf(rv) @@ -225,7 +258,10 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 if nullable { if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice { - panic(fmt.Sprintf("field %v has invalid type: got %v, want pointer", fd.FullName(), ft)) + // This never occurs for generated message types. + // Despite the protobuf type system specifying presence, + // the Go field type cannot represent it. + nullable = false } if ft.Kind() == reflect.Ptr { ft = ft.Elem() @@ -388,6 +424,9 @@ func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x expo return false } rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if fs.Type.Kind() != reflect.Ptr { + return !isZero(rv) + } return !rv.IsNil() }, clear: func(p pointer) { @@ -404,13 +443,13 @@ func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x expo set: func(p pointer, v pref.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(conv.GoValueOf(v)) - if rv.IsNil() { + if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName())) } }, mutable: func(p pointer) pref.Value { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if rv.IsNil() { + if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { rv.Set(conv.GoValueOf(conv.New())) } return conv.PBValueOf(rv) @@ -464,3 +503,41 @@ func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInf } return oi } + +// isZero is identical to reflect.Value.IsZero. +// TODO: Remove this when Go1.13 is the minimally supported Go version. +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(v.Float()) == 0 + case reflect.Complex64, reflect.Complex128: + c := v.Complex() + return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !isZero(v.Index(i)) { + return false + } + } + return true + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + return v.IsNil() + case reflect.String: + return v.Len() == 0 + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !isZero(v.Field(i)) { + return false + } + } + return true + default: + panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()}) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 67b4ede67..4c491bdf4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package impl @@ -121,6 +122,7 @@ func (p pointer) String() *string { return p.v.Interface().(*string) } func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } +func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } func (p pointer) Extensions() *map[int32]ExtensionField { diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 088aa85d4..ee0e0573e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package impl @@ -109,6 +110,7 @@ func (p pointer) String() *string { return (*string)(p.p) func (p pointer) StringPtr() **string { return (**string)(p.p) } func (p pointer) StringSlice() *[]string { return (*[]string)(p.p) } func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) } +func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) } func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } diff --git a/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go b/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go deleted file mode 100644 index a3de1cf32..000000000 --- a/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package mapsort provides sorted access to maps. -package mapsort - -import ( - "sort" - - "google.golang.org/protobuf/reflect/protoreflect" -) - -// Range iterates over every map entry in sorted key order, -// calling f for each key and value encountered. -func Range(mapv protoreflect.Map, keyKind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) { - var keys []protoreflect.MapKey - mapv.Range(func(key protoreflect.MapKey, _ protoreflect.Value) bool { - keys = append(keys, key) - return true - }) - sort.Slice(keys, func(i, j int) bool { - switch keyKind { - case protoreflect.BoolKind: - return !keys[i].Bool() && keys[j].Bool() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, - protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return keys[i].Int() < keys[j].Int() - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, - protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return keys[i].Uint() < keys[j].Uint() - case protoreflect.StringKind: - return keys[i].String() < keys[j].String() - default: - panic("invalid kind: " + keyKind.String()) - } - }) - for _, key := range keys { - if !f(key, mapv.Get(key)) { - break - } - } -} diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go new file mode 100644 index 000000000..2a24953f6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/order/order.go @@ -0,0 +1,89 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package order + +import ( + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// FieldOrder specifies the ordering to visit message fields. +// It is a function that reports whether x is ordered before y. +type FieldOrder func(x, y pref.FieldDescriptor) bool + +var ( + // AnyFieldOrder specifies no specific field ordering. + AnyFieldOrder FieldOrder = nil + + // LegacyFieldOrder sorts fields in the same ordering as emitted by + // wire serialization in the github.com/golang/protobuf implementation. + LegacyFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + ox, oy := x.ContainingOneof(), y.ContainingOneof() + inOneof := func(od pref.OneofDescriptor) bool { + return od != nil && !od.IsSynthetic() + } + + // Extension fields sort before non-extension fields. + if x.IsExtension() != y.IsExtension() { + return x.IsExtension() && !y.IsExtension() + } + // Fields not within a oneof sort before those within a oneof. + if inOneof(ox) != inOneof(oy) { + return !inOneof(ox) && inOneof(oy) + } + // Fields in disjoint oneof sets are sorted by declaration index. + if ox != nil && oy != nil && ox != oy { + return ox.Index() < oy.Index() + } + // Fields sorted by field number. + return x.Number() < y.Number() + } + + // NumberFieldOrder sorts fields by their field number. + NumberFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + return x.Number() < y.Number() + } + + // IndexNameFieldOrder sorts non-extension fields before extension fields. + // Non-extensions are sorted according to their declaration index. + // Extensions are sorted according to their full name. + IndexNameFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + // Non-extension fields sort before extension fields. + if x.IsExtension() != y.IsExtension() { + return !x.IsExtension() && y.IsExtension() + } + // Extensions sorted by fullname. + if x.IsExtension() && y.IsExtension() { + return x.FullName() < y.FullName() + } + // Non-extensions sorted by declaration index. + return x.Index() < y.Index() + } +) + +// KeyOrder specifies the ordering to visit map entries. +// It is a function that reports whether x is ordered before y. +type KeyOrder func(x, y pref.MapKey) bool + +var ( + // AnyKeyOrder specifies no specific key ordering. + AnyKeyOrder KeyOrder = nil + + // GenericKeyOrder sorts false before true, numeric keys in ascending order, + // and strings in lexicographical ordering according to UTF-8 codepoints. + GenericKeyOrder KeyOrder = func(x, y pref.MapKey) bool { + switch x.Interface().(type) { + case bool: + return !x.Bool() && y.Bool() + case int32, int64: + return x.Int() < y.Int() + case uint32, uint64: + return x.Uint() < y.Uint() + case string: + return x.String() < y.String() + default: + panic("invalid map key type") + } + } +) diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go new file mode 100644 index 000000000..c8090e0c5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -0,0 +1,115 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package order provides ordered access to messages and maps. +package order + +import ( + "sort" + "sync" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type messageField struct { + fd pref.FieldDescriptor + v pref.Value +} + +var messageFieldPool = sync.Pool{ + New: func() interface{} { return new([]messageField) }, +} + +type ( + // FieldRnger is an interface for visiting all fields in a message. + // The protoreflect.Message type implements this interface. + FieldRanger interface{ Range(VisitField) } + // VisitField is called everytime a message field is visited. + VisitField = func(pref.FieldDescriptor, pref.Value) bool +) + +// RangeFields iterates over the fields of fs according to the specified order. +func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { + if less == nil { + fs.Range(fn) + return + } + + // Obtain a pre-allocated scratch buffer. + p := messageFieldPool.Get().(*[]messageField) + fields := (*p)[:0] + defer func() { + if cap(fields) < 1024 { + *p = fields + messageFieldPool.Put(p) + } + }() + + // Collect all fields in the message and sort them. + fs.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { + fields = append(fields, messageField{fd, v}) + return true + }) + sort.Slice(fields, func(i, j int) bool { + return less(fields[i].fd, fields[j].fd) + }) + + // Visit the fields in the specified ordering. + for _, f := range fields { + if !fn(f.fd, f.v) { + return + } + } +} + +type mapEntry struct { + k pref.MapKey + v pref.Value +} + +var mapEntryPool = sync.Pool{ + New: func() interface{} { return new([]mapEntry) }, +} + +type ( + // EntryRanger is an interface for visiting all fields in a message. + // The protoreflect.Map type implements this interface. + EntryRanger interface{ Range(VisitEntry) } + // VisitEntry is called everytime a map entry is visited. + VisitEntry = func(pref.MapKey, pref.Value) bool +) + +// RangeEntries iterates over the entries of es according to the specified order. +func RangeEntries(es EntryRanger, less KeyOrder, fn VisitEntry) { + if less == nil { + es.Range(fn) + return + } + + // Obtain a pre-allocated scratch buffer. + p := mapEntryPool.Get().(*[]mapEntry) + entries := (*p)[:0] + defer func() { + if cap(entries) < 1024 { + *p = entries + mapEntryPool.Put(p) + } + }() + + // Collect all entries in the map and sort them. + es.Range(func(k pref.MapKey, v pref.Value) bool { + entries = append(entries, mapEntry{k, v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + return less(entries[i].k, entries[j].k) + }) + + // Visit the entries in the specified ordering. + for _, e := range entries { + if !fn(e.k, e.v) { + return + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go index 85e074c97..a1f6f3338 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index 2160c7019..56a8a4ed3 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package strs diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 72cf770b4..3d40d5249 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 25 + Minor = 28 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 42fc5195e..11bf7173b 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -42,16 +42,25 @@ type UnmarshalOptions struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int } // Unmarshal parses the wire-format message in b and places the result in m. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m Message) error { - _, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect()) + _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) return err } // Unmarshal parses the wire-format message in b and places the result in m. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } _, err := o.unmarshal(b, m.ProtoReflect()) return err } @@ -61,6 +70,9 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { // This method permits fine-grained control over the unmarshaler. // Most users should use Unmarshal instead. func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } return o.unmarshal(in.Buf, in.Message) } @@ -84,12 +96,17 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto Message: m, Buf: b, Resolver: o.Resolver, + Depth: o.RecursionLimit, } if o.DiscardUnknown { in.Flags |= protoiface.UnmarshalDiscardUnknown } out, err = methods.Unmarshal(in) } else { + o.RecursionLimit-- + if o.RecursionLimit < 0 { + return out, errors.New("exceeded max recursion depth") + } err = o.unmarshalMessageSlow(b, m) } if err != nil { @@ -116,10 +133,10 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) // Parse the tag (field number and wire type). num, wtyp, tagLen := protowire.ConsumeTag(b) if tagLen < 0 { - return protowire.ParseError(tagLen) + return errDecode } if num > protowire.MaxValidNumber { - return errors.New("invalid field number") + return errDecode } // Find the field descriptor for this field number. @@ -159,7 +176,7 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) } valLen = protowire.ConsumeFieldValue(num, wtyp, b[tagLen:]) if valLen < 0 { - return protowire.ParseError(valLen) + return errDecode } if !o.DiscardUnknown { m.SetUnknown(append(m.GetUnknown(), b[:tagLen+valLen]...)) @@ -194,7 +211,7 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto } b, n = protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } var ( keyField = fd.MapKey() @@ -213,10 +230,10 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto for len(b) > 0 { num, wtyp, n := protowire.ConsumeTag(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } if num > protowire.MaxValidNumber { - return 0, errors.New("invalid field number") + return 0, errDecode } b = b[n:] err = errUnknown @@ -246,7 +263,7 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto if err == errUnknown { n = protowire.ConsumeFieldValue(num, wtyp, b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } } else if err != nil { return 0, err @@ -272,3 +289,5 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto // to the unknown field set of a message. It is never returned from an exported // function. var errUnknown = errors.New("BUG: internal error (unknown)") + +var errDecode = errors.New("cannot parse invalid wire-format data") diff --git a/vendor/google.golang.org/protobuf/proto/decode_gen.go b/vendor/google.golang.org/protobuf/proto/decode_gen.go index d6dc904dc..301eeb20f 100644 --- a/vendor/google.golang.org/protobuf/proto/decode_gen.go +++ b/vendor/google.golang.org/protobuf/proto/decode_gen.go @@ -27,7 +27,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfBool(protowire.DecodeBool(v)), n, nil case protoreflect.EnumKind: @@ -36,7 +36,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), n, nil case protoreflect.Int32Kind: @@ -45,7 +45,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt32(int32(v)), n, nil case protoreflect.Sint32Kind: @@ -54,7 +54,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), n, nil case protoreflect.Uint32Kind: @@ -63,7 +63,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfUint32(uint32(v)), n, nil case protoreflect.Int64Kind: @@ -72,7 +72,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt64(int64(v)), n, nil case protoreflect.Sint64Kind: @@ -81,7 +81,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), n, nil case protoreflect.Uint64Kind: @@ -90,7 +90,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfUint64(v), n, nil case protoreflect.Sfixed32Kind: @@ -99,7 +99,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt32(int32(v)), n, nil case protoreflect.Fixed32Kind: @@ -108,7 +108,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfUint32(uint32(v)), n, nil case protoreflect.FloatKind: @@ -117,7 +117,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), n, nil case protoreflect.Sfixed64Kind: @@ -126,7 +126,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt64(int64(v)), n, nil case protoreflect.Fixed64Kind: @@ -135,7 +135,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfUint64(v), n, nil case protoreflect.DoubleKind: @@ -144,7 +144,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfFloat64(math.Float64frombits(v)), n, nil case protoreflect.StringKind: @@ -153,7 +153,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } if strs.EnforceUTF8(fd) && !utf8.Valid(v) { return protoreflect.Value{}, 0, errors.InvalidUTF8(string(fd.FullName())) @@ -165,7 +165,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), n, nil case protoreflect.MessageKind: @@ -174,7 +174,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfBytes(v), n, nil case protoreflect.GroupKind: @@ -183,7 +183,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeGroup(fd.Number(), b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfBytes(v), n, nil default: @@ -197,12 +197,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) @@ -214,7 +214,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) return n, nil @@ -222,12 +222,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) @@ -239,7 +239,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) return n, nil @@ -247,12 +247,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt32(int32(v))) @@ -264,7 +264,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) return n, nil @@ -272,12 +272,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) @@ -289,7 +289,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) return n, nil @@ -297,12 +297,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfUint32(uint32(v))) @@ -314,7 +314,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) return n, nil @@ -322,12 +322,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt64(int64(v))) @@ -339,7 +339,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) return n, nil @@ -347,12 +347,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) @@ -364,7 +364,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) return n, nil @@ -372,12 +372,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfUint64(v)) @@ -389,7 +389,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfUint64(v)) return n, nil @@ -397,12 +397,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed32(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt32(int32(v))) @@ -414,7 +414,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) return n, nil @@ -422,12 +422,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed32(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfUint32(uint32(v))) @@ -439,7 +439,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) return n, nil @@ -447,12 +447,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed32(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) @@ -464,7 +464,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) return n, nil @@ -472,12 +472,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed64(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt64(int64(v))) @@ -489,7 +489,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) return n, nil @@ -497,12 +497,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed64(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfUint64(v)) @@ -514,7 +514,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfUint64(v)) return n, nil @@ -522,12 +522,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed64(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) @@ -539,7 +539,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) return n, nil @@ -549,7 +549,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } if strs.EnforceUTF8(fd) && !utf8.Valid(v) { return 0, errors.InvalidUTF8(string(fd.FullName())) @@ -562,7 +562,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) return n, nil @@ -572,7 +572,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } m := list.NewElement() if err := o.unmarshalMessage(v, m.Message()); err != nil { @@ -586,7 +586,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeGroup(fd.Number(), b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } m := list.NewElement() if err := o.unmarshalMessage(v, m.Message()); err != nil { diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index 7b47a1180..d18239c23 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -5,12 +5,9 @@ package proto import ( - "sort" - "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/fieldsort" - "google.golang.org/protobuf/internal/mapsort" + "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoiface" @@ -211,14 +208,15 @@ func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([] if messageset.IsMessageSet(m.Descriptor()) { return o.marshalMessageSet(b, m) } - // There are many choices for what order we visit fields in. The default one here - // is chosen for reasonable efficiency and simplicity given the protoreflect API. - // It is not deterministic, since Message.Range does not return fields in any - // defined order. - // - // When using deterministic serialization, we sort the known fields. + fieldOrder := order.AnyFieldOrder + if o.Deterministic { + // TODO: This should use a more natural ordering like NumberFieldOrder, + // but doing so breaks golden tests that make invalid assumption about + // output stability of this implementation. + fieldOrder = order.LegacyFieldOrder + } var err error - o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { b, err = o.marshalField(b, fd, v) return err == nil }) @@ -229,27 +227,6 @@ func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([] return b, nil } -// rangeFields visits fields in a defined order when deterministic serialization is enabled. -func (o MarshalOptions) rangeFields(m protoreflect.Message, f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - if !o.Deterministic { - m.Range(f) - return - } - var fds []protoreflect.FieldDescriptor - m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - fds = append(fds, fd) - return true - }) - sort.Slice(fds, func(a, b int) bool { - return fieldsort.Less(fds[a], fds[b]) - }) - for _, fd := range fds { - if !f(fd, m.Get(fd)) { - break - } - } -} - func (o MarshalOptions) marshalField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { switch { case fd.IsList(): @@ -292,8 +269,12 @@ func (o MarshalOptions) marshalList(b []byte, fd protoreflect.FieldDescriptor, l func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) ([]byte, error) { keyf := fd.MapKey() valf := fd.MapValue() + keyOrder := order.AnyKeyOrder + if o.Deterministic { + keyOrder = order.GenericKeyOrder + } var err error - o.rangeMap(mapv, keyf.Kind(), func(key protoreflect.MapKey, value protoreflect.Value) bool { + order.RangeEntries(mapv, keyOrder, func(key protoreflect.MapKey, value protoreflect.Value) bool { b = protowire.AppendTag(b, fd.Number(), protowire.BytesType) var pos int b, pos = appendSpeculativeLength(b) @@ -312,14 +293,6 @@ func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, ma return b, err } -func (o MarshalOptions) rangeMap(mapv protoreflect.Map, kind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) { - if !o.Deterministic { - mapv.Range(f) - return - } - mapsort.Range(mapv, kind, f) -} - // When encoding length-prefixed fields, we speculatively set aside some number of bytes // for the length, encode the data, and then encode the length (shifting the data if necessary // to make room). diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 10902bd85..4dba2b969 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -111,18 +111,31 @@ func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { // equalValue compares two singular values. func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { - switch { - case fd.Message() != nil: - return equalMessage(x.Message(), y.Message()) - case fd.Kind() == pref.BytesKind: - return bytes.Equal(x.Bytes(), y.Bytes()) - case fd.Kind() == pref.FloatKind, fd.Kind() == pref.DoubleKind: + switch fd.Kind() { + case pref.BoolKind: + return x.Bool() == y.Bool() + case pref.EnumKind: + return x.Enum() == y.Enum() + case pref.Int32Kind, pref.Sint32Kind, + pref.Int64Kind, pref.Sint64Kind, + pref.Sfixed32Kind, pref.Sfixed64Kind: + return x.Int() == y.Int() + case pref.Uint32Kind, pref.Uint64Kind, + pref.Fixed32Kind, pref.Fixed64Kind: + return x.Uint() == y.Uint() + case pref.FloatKind, pref.DoubleKind: fx := x.Float() fy := y.Float() if math.IsNaN(fx) || math.IsNaN(fy) { return math.IsNaN(fx) && math.IsNaN(fy) } return fx == fy + case pref.StringKind: + return x.String() == y.String() + case pref.BytesKind: + return bytes.Equal(x.Bytes(), y.Bytes()) + case pref.MessageKind, pref.GroupKind: + return equalMessage(x.Message(), y.Message()) default: return x.Interface() == y.Interface() } diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go index 1d692c3a8..312d5d45c 100644 --- a/vendor/google.golang.org/protobuf/proto/messageset.go +++ b/vendor/google.golang.org/protobuf/proto/messageset.go @@ -9,6 +9,7 @@ import ( "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -28,8 +29,12 @@ func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]b if !flags.ProtoLegacy { return b, errors.New("no support for message_set_wire_format") } + fieldOrder := order.AnyFieldOrder + if o.Deterministic { + fieldOrder = order.NumberFieldOrder + } var err error - o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { b, err = o.marshalMessageSetField(b, fd, v) return err == nil }) diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go index ca14b09c3..1f0d183b1 100644 --- a/vendor/google.golang.org/protobuf/proto/proto.go +++ b/vendor/google.golang.org/protobuf/proto/proto.go @@ -32,3 +32,12 @@ var Error error func init() { Error = errors.Error } + +// MessageName returns the full name of m. +// If m is nil, it returns an empty string. +func MessageName(m Message) protoreflect.FullName { + if m == nil { + return "" + } + return m.ProtoReflect().Descriptor().FullName() +} diff --git a/vendor/google.golang.org/protobuf/proto/proto_methods.go b/vendor/google.golang.org/protobuf/proto/proto_methods.go index d8dd604f6..465e057b3 100644 --- a/vendor/google.golang.org/protobuf/proto/proto_methods.go +++ b/vendor/google.golang.org/protobuf/proto/proto_methods.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // The protoreflect build tag disables use of fast-path methods. +//go:build !protoreflect // +build !protoreflect package proto diff --git a/vendor/google.golang.org/protobuf/proto/proto_reflect.go b/vendor/google.golang.org/protobuf/proto/proto_reflect.go index b103d4320..494d6ceef 100644 --- a/vendor/google.golang.org/protobuf/proto/proto_reflect.go +++ b/vendor/google.golang.org/protobuf/proto/proto_reflect.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // The protoreflect build tag disables use of fast-path methods. +//go:build protoreflect // +build protoreflect package proto diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go new file mode 100644 index 000000000..e4dfb1205 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -0,0 +1,276 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protodesc provides functionality for converting +// FileDescriptorProto messages to/from protoreflect.FileDescriptor values. +// +// The google.protobuf.FileDescriptorProto is a protobuf message that describes +// the type information for a .proto file in a form that is easily serializable. +// The protoreflect.FileDescriptor is a more structured representation of +// the FileDescriptorProto message where references and remote dependencies +// can be directly followed. +package protodesc + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// Resolver is the resolver used by NewFile to resolve dependencies. +// The enums and messages provided must belong to some parent file, +// which is also registered. +// +// It is implemented by protoregistry.Files. +type Resolver interface { + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) +} + +// FileOptions configures the construction of file descriptors. +type FileOptions struct { + pragma.NoUnkeyedLiterals + + // AllowUnresolvable configures New to permissively allow unresolvable + // file, enum, or message dependencies. Unresolved dependencies are replaced + // by placeholder equivalents. + // + // The following dependencies may be left unresolved: + // • Resolving an imported file. + // • Resolving the type for a message field or extension field. + // If the kind of the field is unknown, then a placeholder is used for both + // the Enum and Message accessors on the protoreflect.FieldDescriptor. + // • Resolving an enum value set as the default for an optional enum field. + // If unresolvable, the protoreflect.FieldDescriptor.Default is set to the + // first value in the associated enum (or zero if the also enum dependency + // is also unresolvable). The protoreflect.FieldDescriptor.DefaultEnumValue + // is populated with a placeholder. + // • Resolving the extended message type for an extension field. + // • Resolving the input or output message type for a service method. + // + // If the unresolved dependency uses a relative name, + // then the placeholder will contain an invalid FullName with a "*." prefix, + // indicating that the starting prefix of the full name is unknown. + AllowUnresolvable bool +} + +// NewFile creates a new protoreflect.FileDescriptor from the provided +// file descriptor message. See FileOptions.New for more information. +func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { + return FileOptions{}.New(fd, r) +} + +// NewFiles creates a new protoregistry.Files from the provided +// FileDescriptorSet message. See FileOptions.NewFiles for more information. +func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { + return FileOptions{}.NewFiles(fd) +} + +// New creates a new protoreflect.FileDescriptor from the provided +// file descriptor message. The file must represent a valid proto file according +// to protobuf semantics. The returned descriptor is a deep copy of the input. +// +// Any imported files, enum types, or message types referenced in the file are +// resolved using the provided registry. When looking up an import file path, +// the path must be unique. The newly created file descriptor is not registered +// back into the provided file registry. +func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { + if r == nil { + r = (*protoregistry.Files)(nil) // empty resolver + } + + // Handle the file descriptor content. + f := &filedesc.File{L2: &filedesc.FileL2{}} + switch fd.GetSyntax() { + case "proto2", "": + f.L1.Syntax = protoreflect.Proto2 + case "proto3": + f.L1.Syntax = protoreflect.Proto3 + default: + return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) + } + f.L1.Path = fd.GetName() + if f.L1.Path == "" { + return nil, errors.New("file path must be populated") + } + f.L1.Package = protoreflect.FullName(fd.GetPackage()) + if !f.L1.Package.IsValid() && f.L1.Package != "" { + return nil, errors.New("invalid package: %q", f.L1.Package) + } + if opts := fd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FileOptions) + f.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + + f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) + for _, i := range fd.GetPublicDependency() { + if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsPublic { + return nil, errors.New("invalid or duplicate public import index: %d", i) + } + f.L2.Imports[i].IsPublic = true + } + for _, i := range fd.GetWeakDependency() { + if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak { + return nil, errors.New("invalid or duplicate weak import index: %d", i) + } + f.L2.Imports[i].IsWeak = true + } + imps := importSet{f.Path(): true} + for i, path := range fd.GetDependency() { + imp := &f.L2.Imports[i] + f, err := r.FindFileByPath(path) + if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) { + f = filedesc.PlaceholderFile(path) + } else if err != nil { + return nil, errors.New("could not resolve import %q: %v", path, err) + } + imp.FileDescriptor = f + + if imps[imp.Path()] { + return nil, errors.New("already imported %q", path) + } + imps[imp.Path()] = true + } + for i := range fd.GetDependency() { + imp := &f.L2.Imports[i] + imps.importPublic(imp.Imports()) + } + + // Handle source locations. + f.L2.Locations.File = f + for _, loc := range fd.GetSourceCodeInfo().GetLocation() { + var l protoreflect.SourceLocation + // TODO: Validate that the path points to an actual declaration? + l.Path = protoreflect.SourcePath(loc.GetPath()) + s := loc.GetSpan() + switch len(s) { + case 3: + l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[0]), int(s[2]) + case 4: + l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[2]), int(s[3]) + default: + return nil, errors.New("invalid span: %v", s) + } + // TODO: Validate that the span information is sensible? + // See https://github.com/protocolbuffers/protobuf/issues/6378. + if false && (l.EndLine < l.StartLine || l.StartLine < 0 || l.StartColumn < 0 || l.EndColumn < 0 || + (l.StartLine == l.EndLine && l.EndColumn <= l.StartColumn)) { + return nil, errors.New("invalid span: %v", s) + } + l.LeadingDetachedComments = loc.GetLeadingDetachedComments() + l.LeadingComments = loc.GetLeadingComments() + l.TrailingComments = loc.GetTrailingComments() + f.L2.Locations.List = append(f.L2.Locations.List, l) + } + + // Step 1: Allocate and derive the names for all declarations. + // This copies all fields from the descriptor proto except: + // google.protobuf.FieldDescriptorProto.type_name + // google.protobuf.FieldDescriptorProto.default_value + // google.protobuf.FieldDescriptorProto.oneof_index + // google.protobuf.FieldDescriptorProto.extendee + // google.protobuf.MethodDescriptorProto.input + // google.protobuf.MethodDescriptorProto.output + var err error + sb := new(strs.Builder) + r1 := make(descsByName) + if f.L1.Enums.List, err = r1.initEnumDeclarations(fd.GetEnumType(), f, sb); err != nil { + return nil, err + } + if f.L1.Messages.List, err = r1.initMessagesDeclarations(fd.GetMessageType(), f, sb); err != nil { + return nil, err + } + if f.L1.Extensions.List, err = r1.initExtensionDeclarations(fd.GetExtension(), f, sb); err != nil { + return nil, err + } + if f.L1.Services.List, err = r1.initServiceDeclarations(fd.GetService(), f, sb); err != nil { + return nil, err + } + + // Step 2: Resolve every dependency reference not handled by step 1. + r2 := &resolver{local: r1, remote: r, imports: imps, allowUnresolvable: o.AllowUnresolvable} + if err := r2.resolveMessageDependencies(f.L1.Messages.List, fd.GetMessageType()); err != nil { + return nil, err + } + if err := r2.resolveExtensionDependencies(f.L1.Extensions.List, fd.GetExtension()); err != nil { + return nil, err + } + if err := r2.resolveServiceDependencies(f.L1.Services.List, fd.GetService()); err != nil { + return nil, err + } + + // Step 3: Validate every enum, message, and extension declaration. + if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil { + return nil, err + } + if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil { + return nil, err + } + if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil { + return nil, err + } + + return f, nil +} + +type importSet map[string]bool + +func (is importSet) importPublic(imps protoreflect.FileImports) { + for i := 0; i < imps.Len(); i++ { + if imp := imps.Get(i); imp.IsPublic { + is[imp.Path()] = true + is.importPublic(imp.Imports()) + } + } +} + +// NewFiles creates a new protoregistry.Files from the provided +// FileDescriptorSet message. The descriptor set must include only +// valid files according to protobuf semantics. The returned descriptors +// are a deep copy of the input. +func (o FileOptions) NewFiles(fds *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { + files := make(map[string]*descriptorpb.FileDescriptorProto) + for _, fd := range fds.File { + if _, ok := files[fd.GetName()]; ok { + return nil, errors.New("file appears multiple times: %q", fd.GetName()) + } + files[fd.GetName()] = fd + } + r := &protoregistry.Files{} + for _, fd := range files { + if err := o.addFileDeps(r, fd, files); err != nil { + return nil, err + } + } + return r, nil +} +func (o FileOptions) addFileDeps(r *protoregistry.Files, fd *descriptorpb.FileDescriptorProto, files map[string]*descriptorpb.FileDescriptorProto) error { + // Set the entry to nil while descending into a file's dependencies to detect cycles. + files[fd.GetName()] = nil + for _, dep := range fd.Dependency { + depfd, ok := files[dep] + if depfd == nil { + if ok { + return errors.New("import cycle in file: %q", dep) + } + continue + } + if err := o.addFileDeps(r, depfd, files); err != nil { + return err + } + } + // Delete the entry once dependencies are processed. + delete(files, fd.GetName()) + f, err := o.New(fd, r) + if err != nil { + return err + } + return r.RegisterFile(f) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go new file mode 100644 index 000000000..37efda1af --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -0,0 +1,248 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +type descsByName map[protoreflect.FullName]protoreflect.Descriptor + +func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (es []filedesc.Enum, err error) { + es = make([]filedesc.Enum, len(eds)) // allocate up-front to ensure stable pointers + for i, ed := range eds { + e := &es[i] + e.L2 = new(filedesc.EnumL2) + if e.L0, err = r.makeBase(e, parent, ed.GetName(), i, sb); err != nil { + return nil, err + } + if opts := ed.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.EnumOptions) + e.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + for _, s := range ed.GetReservedName() { + e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) + } + for _, rr := range ed.GetReservedRange() { + e.L2.ReservedRanges.List = append(e.L2.ReservedRanges.List, [2]protoreflect.EnumNumber{ + protoreflect.EnumNumber(rr.GetStart()), + protoreflect.EnumNumber(rr.GetEnd()), + }) + } + if e.L2.Values.List, err = r.initEnumValuesFromDescriptorProto(ed.GetValue(), e, sb); err != nil { + return nil, err + } + } + return es, nil +} + +func (r descsByName) initEnumValuesFromDescriptorProto(vds []*descriptorpb.EnumValueDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (vs []filedesc.EnumValue, err error) { + vs = make([]filedesc.EnumValue, len(vds)) // allocate up-front to ensure stable pointers + for i, vd := range vds { + v := &vs[i] + if v.L0, err = r.makeBase(v, parent, vd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := vd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.EnumValueOptions) + v.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + v.L1.Number = protoreflect.EnumNumber(vd.GetNumber()) + } + return vs, nil +} + +func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Message, err error) { + ms = make([]filedesc.Message, len(mds)) // allocate up-front to ensure stable pointers + for i, md := range mds { + m := &ms[i] + m.L2 = new(filedesc.MessageL2) + if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { + return nil, err + } + if opts := md.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.MessageOptions) + m.L2.Options = func() protoreflect.ProtoMessage { return opts } + m.L1.IsMapEntry = opts.GetMapEntry() + m.L1.IsMessageSet = opts.GetMessageSetWireFormat() + } + for _, s := range md.GetReservedName() { + m.L2.ReservedNames.List = append(m.L2.ReservedNames.List, protoreflect.Name(s)) + } + for _, rr := range md.GetReservedRange() { + m.L2.ReservedRanges.List = append(m.L2.ReservedRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(rr.GetStart()), + protoreflect.FieldNumber(rr.GetEnd()), + }) + } + for _, xr := range md.GetExtensionRange() { + m.L2.ExtensionRanges.List = append(m.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(xr.GetStart()), + protoreflect.FieldNumber(xr.GetEnd()), + }) + var optsFunc func() protoreflect.ProtoMessage + if opts := xr.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.ExtensionRangeOptions) + optsFunc = func() protoreflect.ProtoMessage { return opts } + } + m.L2.ExtensionRangeOptions = append(m.L2.ExtensionRangeOptions, optsFunc) + } + if m.L2.Fields.List, err = r.initFieldsFromDescriptorProto(md.GetField(), m, sb); err != nil { + return nil, err + } + if m.L2.Oneofs.List, err = r.initOneofsFromDescriptorProto(md.GetOneofDecl(), m, sb); err != nil { + return nil, err + } + if m.L1.Enums.List, err = r.initEnumDeclarations(md.GetEnumType(), m, sb); err != nil { + return nil, err + } + if m.L1.Messages.List, err = r.initMessagesDeclarations(md.GetNestedType(), m, sb); err != nil { + return nil, err + } + if m.L1.Extensions.List, err = r.initExtensionDeclarations(md.GetExtension(), m, sb); err != nil { + return nil, err + } + } + return ms, nil +} + +func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { + fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers + for i, fd := range fds { + f := &fs[i] + if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil { + return nil, err + } + f.L1.IsProto3Optional = fd.GetProto3Optional() + if opts := fd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FieldOptions) + f.L1.Options = func() protoreflect.ProtoMessage { return opts } + f.L1.IsWeak = opts.GetWeak() + f.L1.HasPacked = opts.Packed != nil + f.L1.IsPacked = opts.GetPacked() + } + f.L1.Number = protoreflect.FieldNumber(fd.GetNumber()) + f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel()) + if fd.Type != nil { + f.L1.Kind = protoreflect.Kind(fd.GetType()) + } + if fd.JsonName != nil { + f.L1.StringName.InitJSON(fd.GetJsonName()) + } + } + return fs, nil +} + +func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (os []filedesc.Oneof, err error) { + os = make([]filedesc.Oneof, len(ods)) // allocate up-front to ensure stable pointers + for i, od := range ods { + o := &os[i] + if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil { + return nil, err + } + if opts := od.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.OneofOptions) + o.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + } + return os, nil +} + +func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (xs []filedesc.Extension, err error) { + xs = make([]filedesc.Extension, len(xds)) // allocate up-front to ensure stable pointers + for i, xd := range xds { + x := &xs[i] + x.L2 = new(filedesc.ExtensionL2) + if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := xd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FieldOptions) + x.L2.Options = func() protoreflect.ProtoMessage { return opts } + x.L2.IsPacked = opts.GetPacked() + } + x.L1.Number = protoreflect.FieldNumber(xd.GetNumber()) + x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel()) + if xd.Type != nil { + x.L1.Kind = protoreflect.Kind(xd.GetType()) + } + if xd.JsonName != nil { + x.L2.StringName.InitJSON(xd.GetJsonName()) + } + } + return xs, nil +} + +func (r descsByName) initServiceDeclarations(sds []*descriptorpb.ServiceDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ss []filedesc.Service, err error) { + ss = make([]filedesc.Service, len(sds)) // allocate up-front to ensure stable pointers + for i, sd := range sds { + s := &ss[i] + s.L2 = new(filedesc.ServiceL2) + if s.L0, err = r.makeBase(s, parent, sd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := sd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.ServiceOptions) + s.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + if s.L2.Methods.List, err = r.initMethodsFromDescriptorProto(sd.GetMethod(), s, sb); err != nil { + return nil, err + } + } + return ss, nil +} + +func (r descsByName) initMethodsFromDescriptorProto(mds []*descriptorpb.MethodDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Method, err error) { + ms = make([]filedesc.Method, len(mds)) // allocate up-front to ensure stable pointers + for i, md := range mds { + m := &ms[i] + if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { + return nil, err + } + if opts := md.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.MethodOptions) + m.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + m.L1.IsStreamingClient = md.GetClientStreaming() + m.L1.IsStreamingServer = md.GetServerStreaming() + } + return ms, nil +} + +func (r descsByName) makeBase(child, parent protoreflect.Descriptor, name string, idx int, sb *strs.Builder) (filedesc.BaseL0, error) { + if !protoreflect.Name(name).IsValid() { + return filedesc.BaseL0{}, errors.New("descriptor %q has an invalid nested name: %q", parent.FullName(), name) + } + + // Derive the full name of the child. + // Note that enum values are a sibling to the enum parent in the namespace. + var fullName protoreflect.FullName + if _, ok := parent.(protoreflect.EnumDescriptor); ok { + fullName = sb.AppendFullName(parent.FullName().Parent(), protoreflect.Name(name)) + } else { + fullName = sb.AppendFullName(parent.FullName(), protoreflect.Name(name)) + } + if _, ok := r[fullName]; ok { + return filedesc.BaseL0{}, errors.New("descriptor %q already declared", fullName) + } + r[fullName] = child + + // TODO: Verify that the full name does not already exist in the resolver? + // This is not as critical since most usages of NewFile will register + // the created file back into the registry, which will perform this check. + + return filedesc.BaseL0{ + FullName: fullName, + ParentFile: parent.ParentFile().(*filedesc.File), + Parent: parent, + Index: idx, + }, nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go new file mode 100644 index 000000000..cebb36cda --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -0,0 +1,286 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// resolver is a wrapper around a local registry of declarations within the file +// and the remote resolver. The remote resolver is restricted to only return +// descriptors that have been imported. +type resolver struct { + local descsByName + remote Resolver + imports importSet + + allowUnresolvable bool +} + +func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) (err error) { + for i, md := range mds { + m := &ms[i] + for j, fd := range md.GetField() { + f := &m.L2.Fields.List[j] + if f.L1.Cardinality == protoreflect.Required { + m.L2.RequiredNumbers.List = append(m.L2.RequiredNumbers.List, f.L1.Number) + } + if fd.OneofIndex != nil { + k := int(fd.GetOneofIndex()) + if !(0 <= k && k < len(md.GetOneofDecl())) { + return errors.New("message field %q has an invalid oneof index: %d", f.FullName(), k) + } + o := &m.L2.Oneofs.List[k] + f.L1.ContainingOneof = o + o.L1.Fields.List = append(o.L1.Fields.List, f) + } + + if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { + return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) + } + if fd.DefaultValue != nil { + v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) + if err != nil { + return errors.New("message field %q has invalid default: %v", f.FullName(), err) + } + f.L1.Default = filedesc.DefaultValue(v, ev) + } + } + + if err := r.resolveMessageDependencies(m.L1.Messages.List, md.GetNestedType()); err != nil { + return err + } + if err := r.resolveExtensionDependencies(m.L1.Extensions.List, md.GetExtension()); err != nil { + return err + } + } + return nil +} + +func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) { + for i, xd := range xds { + x := &xs[i] + if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil { + return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err) + } + if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil { + return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err) + } + if xd.DefaultValue != nil { + v, ev, err := unmarshalDefault(xd.GetDefaultValue(), x, r.allowUnresolvable) + if err != nil { + return errors.New("extension field %q has invalid default: %v", x.FullName(), err) + } + x.L2.Default = filedesc.DefaultValue(v, ev) + } + } + return nil +} + +func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*descriptorpb.ServiceDescriptorProto) (err error) { + for i, sd := range sds { + s := &ss[i] + for j, md := range sd.GetMethod() { + m := &s.L2.Methods.List[j] + m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false) + if err != nil { + return errors.New("service method %q cannot resolve input: %v", m.FullName(), err) + } + m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false) + if err != nil { + return errors.New("service method %q cannot resolve output: %v", m.FullName(), err) + } + } + } + return nil +} + +// findTarget finds an enum or message descriptor if k is an enum, message, +// group, or unknown. If unknown, and the name could be resolved, the kind +// returned kind is set based on the type of the resolved descriptor. +func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { + switch k { + case protoreflect.EnumKind: + ed, err := r.findEnumDescriptor(scope, ref, isWeak) + if err != nil { + return 0, nil, nil, err + } + return k, ed, nil, nil + case protoreflect.MessageKind, protoreflect.GroupKind: + md, err := r.findMessageDescriptor(scope, ref, isWeak) + if err != nil { + return 0, nil, nil, err + } + return k, nil, md, nil + case 0: + // Handle unspecified kinds (possible with parsers that operate + // on a per-file basis without knowledge of dependencies). + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return 0, nil, nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return 0, nil, nil, err + } + switch d := d.(type) { + case protoreflect.EnumDescriptor: + return protoreflect.EnumKind, d, nil, nil + case protoreflect.MessageDescriptor: + return protoreflect.MessageKind, nil, d, nil + default: + return 0, nil, nil, errors.New("unknown kind") + } + default: + if ref != "" { + return 0, nil, nil, errors.New("target name cannot be specified for %v", k) + } + if !k.IsValid() { + return 0, nil, nil, errors.New("invalid kind: %d", k) + } + return k, nil, nil, nil + } +} + +// findDescriptor finds the descriptor by name, +// which may be a relative name within some scope. +// +// Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar", +// then the following full names are searched: +// * fizz.buzz.Foo.Bar +// * fizz.Foo.Bar +// * Foo.Bar +func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) { + if !ref.IsValid() { + return nil, errors.New("invalid name reference: %q", ref) + } + if ref.IsFull() { + scope, ref = "", ref[1:] + } + var foundButNotImported protoreflect.Descriptor + for { + // Derive the full name to search. + s := protoreflect.FullName(ref) + if scope != "" { + s = scope + "." + s + } + + // Check the current file for the descriptor. + if d, ok := r.local[s]; ok { + return d, nil + } + + // Check the remote registry for the descriptor. + d, err := r.remote.FindDescriptorByName(s) + if err == nil { + // Only allow descriptors covered by one of the imports. + if r.imports[d.ParentFile().Path()] { + return d, nil + } + foundButNotImported = d + } else if err != protoregistry.NotFound { + return nil, errors.Wrap(err, "%q", s) + } + + // Continue on at a higher level of scoping. + if scope == "" { + if d := foundButNotImported; d != nil { + return nil, errors.New("resolved %q, but %q is not imported", d.FullName(), d.ParentFile().Path()) + } + return nil, protoregistry.NotFound + } + scope = scope.Parent() + } +} + +func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) { + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return filedesc.PlaceholderEnum(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return nil, err + } + ed, ok := d.(protoreflect.EnumDescriptor) + if !ok { + return nil, errors.New("resolved %q, but it is not an enum", d.FullName()) + } + return ed, nil +} + +func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) { + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return filedesc.PlaceholderMessage(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return nil, err + } + md, ok := d.(protoreflect.MessageDescriptor) + if !ok { + return nil, errors.New("resolved %q, but it is not an message", d.FullName()) + } + return md, nil +} + +// partialName is the partial name. A leading dot means that the name is full, +// otherwise the name is relative to some current scope. +// See google.protobuf.FieldDescriptorProto.type_name. +type partialName string + +func (s partialName) IsFull() bool { + return len(s) > 0 && s[0] == '.' +} + +func (s partialName) IsValid() bool { + if s.IsFull() { + return protoreflect.FullName(s[1:]).IsValid() + } + return protoreflect.FullName(s).IsValid() +} + +const unknownPrefix = "*." + +// FullName converts the partial name to a full name on a best-effort basis. +// If relative, it creates an invalid full name, using a "*." prefix +// to indicate that the start of the full name is unknown. +func (s partialName) FullName() protoreflect.FullName { + if s.IsFull() { + return protoreflect.FullName(s[1:]) + } + return protoreflect.FullName(unknownPrefix + s) +} + +func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvable bool) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) { + var evs protoreflect.EnumValueDescriptors + if fd.Enum() != nil { + evs = fd.Enum().Values() + } + v, ev, err := defval.Unmarshal(s, fd.Kind(), evs, defval.Descriptor) + if err != nil && allowUnresolvable && evs != nil && protoreflect.Name(s).IsValid() { + v = protoreflect.ValueOfEnum(0) + if evs.Len() > 0 { + v = protoreflect.ValueOfEnum(evs.Get(0).Number()) + } + ev = filedesc.PlaceholderEnumValue(fd.Enum().FullName().Parent().Append(protoreflect.Name(s))) + } else if err != nil { + return v, ev, err + } + if fd.Syntax() == protoreflect.Proto3 { + return v, ev, errors.New("cannot be specified under proto3 semantics") + } + if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { + return v, ev, errors.New("cannot be specified on composite types") + } + return v, ev, nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go new file mode 100644 index 000000000..9af1d5648 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -0,0 +1,374 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "strings" + "unicode" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescriptorProto) error { + for i, ed := range eds { + e := &es[i] + if err := e.L2.ReservedNames.CheckValid(); err != nil { + return errors.New("enum %q reserved names has %v", e.FullName(), err) + } + if err := e.L2.ReservedRanges.CheckValid(); err != nil { + return errors.New("enum %q reserved ranges has %v", e.FullName(), err) + } + if len(ed.GetValue()) == 0 { + return errors.New("enum %q must contain at least one value declaration", e.FullName()) + } + allowAlias := ed.GetOptions().GetAllowAlias() + foundAlias := false + for i := 0; i < e.Values().Len(); i++ { + v1 := e.Values().Get(i) + if v2 := e.Values().ByNumber(v1.Number()); v1 != v2 { + foundAlias = true + if !allowAlias { + return errors.New("enum %q has conflicting non-aliased values on number %d: %q with %q", e.FullName(), v1.Number(), v1.Name(), v2.Name()) + } + } + } + if allowAlias && !foundAlias { + return errors.New("enum %q allows aliases, but none were found", e.FullName()) + } + if e.Syntax() == protoreflect.Proto3 { + if v := e.Values().Get(0); v.Number() != 0 { + return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName()) + } + // Verify that value names in proto3 do not conflict if the + // case-insensitive prefix is removed. + // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055 + names := map[string]protoreflect.EnumValueDescriptor{} + prefix := strings.Replace(strings.ToLower(string(e.Name())), "_", "", -1) + for i := 0; i < e.Values().Len(); i++ { + v1 := e.Values().Get(i) + s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix)) + if v2, ok := names[s]; ok && v1.Number() != v2.Number() { + return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) + } + names[s] = v1 + } + } + + for j, vd := range ed.GetValue() { + v := &e.L2.Values.List[j] + if vd.Number == nil { + return errors.New("enum value %q must have a specified number", v.FullName()) + } + if e.L2.ReservedNames.Has(v.Name()) { + return errors.New("enum value %q must not use reserved name", v.FullName()) + } + if e.L2.ReservedRanges.Has(v.Number()) { + return errors.New("enum value %q must not use reserved number %d", v.FullName(), v.Number()) + } + } + } + return nil +} + +func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { + for i, md := range mds { + m := &ms[i] + + // Handle the message descriptor itself. + isMessageSet := md.GetOptions().GetMessageSetWireFormat() + if err := m.L2.ReservedNames.CheckValid(); err != nil { + return errors.New("message %q reserved names has %v", m.FullName(), err) + } + if err := m.L2.ReservedRanges.CheckValid(isMessageSet); err != nil { + return errors.New("message %q reserved ranges has %v", m.FullName(), err) + } + if err := m.L2.ExtensionRanges.CheckValid(isMessageSet); err != nil { + return errors.New("message %q extension ranges has %v", m.FullName(), err) + } + if err := (*filedesc.FieldRanges).CheckOverlap(&m.L2.ReservedRanges, &m.L2.ExtensionRanges); err != nil { + return errors.New("message %q reserved and extension ranges has %v", m.FullName(), err) + } + for i := 0; i < m.Fields().Len(); i++ { + f1 := m.Fields().Get(i) + if f2 := m.Fields().ByNumber(f1.Number()); f1 != f2 { + return errors.New("message %q has conflicting fields: %q with %q", m.FullName(), f1.Name(), f2.Name()) + } + } + if isMessageSet && !flags.ProtoLegacy { + return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) + } + if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) + } + if m.Syntax() == protoreflect.Proto3 { + if m.ExtensionRanges().Len() > 0 { + return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) + } + // Verify that field names in proto3 do not conflict if lowercased + // with all underscores removed. + // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 + names := map[string]protoreflect.FieldDescriptor{} + for i := 0; i < m.Fields().Len(); i++ { + f1 := m.Fields().Get(i) + s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) + if f2, ok := names[s]; ok { + return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) + } + names[s] = f1 + } + } + + for j, fd := range md.GetField() { + f := &m.L2.Fields.List[j] + if m.L2.ReservedNames.Has(f.Name()) { + return errors.New("message field %q must not use reserved name", f.FullName()) + } + if !f.Number().IsValid() { + return errors.New("message field %q has an invalid number: %d", f.FullName(), f.Number()) + } + if !f.Cardinality().IsValid() { + return errors.New("message field %q has an invalid cardinality: %d", f.FullName(), f.Cardinality()) + } + if m.L2.ReservedRanges.Has(f.Number()) { + return errors.New("message field %q must not use reserved number %d", f.FullName(), f.Number()) + } + if m.L2.ExtensionRanges.Has(f.Number()) { + return errors.New("message field %q with number %d in extension range", f.FullName(), f.Number()) + } + if fd.Extendee != nil { + return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee()) + } + if f.L1.IsProto3Optional { + if f.Syntax() != protoreflect.Proto3 { + return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName()) + } + if f.Cardinality() != protoreflect.Optional { + return errors.New("message field %q under proto3 optional semantics must have optional cardinality", f.FullName()) + } + if f.ContainingOneof() != nil && f.ContainingOneof().Fields().Len() != 1 { + return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) + } + } + if f.IsWeak() && !flags.ProtoLegacy { + return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) + } + if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) { + return errors.New("message field %q may only be weak for an optional message", f.FullName()) + } + if f.IsPacked() && !isPackable(f) { + return errors.New("message field %q is not packable", f.FullName()) + } + if err := checkValidGroup(f); err != nil { + return errors.New("message field %q is an invalid group: %v", f.FullName(), err) + } + if err := checkValidMap(f); err != nil { + return errors.New("message field %q is an invalid map: %v", f.FullName(), err) + } + if f.Syntax() == protoreflect.Proto3 { + if f.Cardinality() == protoreflect.Required { + return errors.New("message field %q using proto3 semantics cannot be required", f.FullName()) + } + if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 { + return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName()) + } + } + } + seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs + for j := range md.GetOneofDecl() { + o := &m.L2.Oneofs.List[j] + if o.Fields().Len() == 0 { + return errors.New("message oneof %q must contain at least one field declaration", o.FullName()) + } + if n := o.Fields().Len(); n-1 != (o.Fields().Get(n-1).Index() - o.Fields().Get(0).Index()) { + return errors.New("message oneof %q must have consecutively declared fields", o.FullName()) + } + + if o.IsSynthetic() { + seenSynthetic = true + continue + } + if !o.IsSynthetic() && seenSynthetic { + return errors.New("message oneof %q must be declared before synthetic oneofs", o.FullName()) + } + + for i := 0; i < o.Fields().Len(); i++ { + f := o.Fields().Get(i) + if f.Cardinality() != protoreflect.Optional { + return errors.New("message field %q belongs in a oneof and must be optional", f.FullName()) + } + if f.IsWeak() { + return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName()) + } + } + } + + if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil { + return err + } + if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil { + return err + } + if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil { + return err + } + } + return nil +} + +func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { + for i, xd := range xds { + x := &xs[i] + // NOTE: Avoid using the IsValid method since extensions to MessageSet + // may have a field number higher than normal. This check only verifies + // that the number is not negative or reserved. We check again later + // if we know that the extendee is definitely not a MessageSet. + if n := x.Number(); n < 0 || (protowire.FirstReservedNumber <= n && n <= protowire.LastReservedNumber) { + return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) + } + if !x.Cardinality().IsValid() || x.Cardinality() == protoreflect.Required { + return errors.New("extension field %q has an invalid cardinality: %d", x.FullName(), x.Cardinality()) + } + if xd.JsonName != nil { + // A bug in older versions of protoc would always populate the + // "json_name" option for extensions when it is meaningless. + // When it did so, it would always use the camel-cased field name. + if xd.GetJsonName() != strs.JSONCamelCase(string(x.Name())) { + return errors.New("extension field %q may not have an explicitly set JSON name: %q", x.FullName(), xd.GetJsonName()) + } + } + if xd.OneofIndex != nil { + return errors.New("extension field %q may not be part of a oneof", x.FullName()) + } + if md := x.ContainingMessage(); !md.IsPlaceholder() { + if !md.ExtensionRanges().Has(x.Number()) { + return errors.New("extension field %q extends %q with non-extension field number: %d", x.FullName(), md.FullName(), x.Number()) + } + isMessageSet := md.Options().(*descriptorpb.MessageOptions).GetMessageSetWireFormat() + if isMessageSet && !isOptionalMessage(x) { + return errors.New("extension field %q extends MessageSet and must be an optional message", x.FullName()) + } + if !isMessageSet && !x.Number().IsValid() { + return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) + } + } + if xd.GetOptions().GetWeak() { + return errors.New("extension field %q cannot be a weak reference", x.FullName()) + } + if x.IsPacked() && !isPackable(x) { + return errors.New("extension field %q is not packable", x.FullName()) + } + if err := checkValidGroup(x); err != nil { + return errors.New("extension field %q is an invalid group: %v", x.FullName(), err) + } + if md := x.Message(); md != nil && md.IsMapEntry() { + return errors.New("extension field %q cannot be a map entry", x.FullName()) + } + if x.Syntax() == protoreflect.Proto3 { + switch x.ContainingMessage().FullName() { + case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.EnumValueOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.MessageOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.FieldOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.OneofOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.ExtensionRangeOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.ServiceOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.MethodOptions)(nil).ProtoReflect().Descriptor().FullName(): + default: + return errors.New("extension field %q cannot be declared in proto3 unless extended descriptor options", x.FullName()) + } + } + } + return nil +} + +// isOptionalMessage reports whether this is an optional message. +// If the kind is unknown, it is assumed to be a message. +func isOptionalMessage(fd protoreflect.FieldDescriptor) bool { + return (fd.Kind() == 0 || fd.Kind() == protoreflect.MessageKind) && fd.Cardinality() == protoreflect.Optional +} + +// isPackable checks whether the pack option can be specified. +func isPackable(fd protoreflect.FieldDescriptor) bool { + switch fd.Kind() { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return fd.IsList() +} + +// checkValidGroup reports whether fd is a valid group according to the same +// rules that protoc imposes. +func checkValidGroup(fd protoreflect.FieldDescriptor) error { + md := fd.Message() + switch { + case fd.Kind() != protoreflect.GroupKind: + return nil + case fd.Syntax() != protoreflect.Proto2: + return errors.New("invalid under proto2 semantics") + case md == nil || md.IsPlaceholder(): + return errors.New("message must be resolvable") + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case !unicode.IsUpper(rune(md.Name()[0])): + return errors.New("message name must start with an uppercase") + case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): + return errors.New("field name must be lowercased form of the message name") + } + return nil +} + +// checkValidMap checks whether the field is a valid map according to the same +// rules that protoc imposes. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:6045-6115 +func checkValidMap(fd protoreflect.FieldDescriptor) error { + md := fd.Message() + switch { + case md == nil || !md.IsMapEntry(): + return nil + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case md.Name() != protoreflect.Name(strs.MapEntryName(string(fd.Name()))): + return errors.New("incorrect implicit map entry name") + case fd.Cardinality() != protoreflect.Repeated: + return errors.New("field must be repeated") + case md.Fields().Len() != 2: + return errors.New("message must have exactly two fields") + case md.ExtensionRanges().Len() > 0: + return errors.New("message must not have any extension ranges") + case md.Enums().Len()+md.Messages().Len()+md.Extensions().Len() > 0: + return errors.New("message must not have any nested declarations") + } + kf := md.Fields().Get(0) + vf := md.Fields().Get(1) + switch { + case kf.Name() != genid.MapEntry_Key_field_name || kf.Number() != genid.MapEntry_Key_field_number || kf.Cardinality() != protoreflect.Optional || kf.ContainingOneof() != nil || kf.HasDefault(): + return errors.New("invalid key field") + case vf.Name() != genid.MapEntry_Value_field_name || vf.Number() != genid.MapEntry_Value_field_number || vf.Cardinality() != protoreflect.Optional || vf.ContainingOneof() != nil || vf.HasDefault(): + return errors.New("invalid value field") + } + switch kf.Kind() { + case protoreflect.BoolKind: // bool + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: // int32 + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: // int64 + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: // uint32 + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: // uint64 + case protoreflect.StringKind: // string + default: + return errors.New("invalid key kind: %v", kf.Kind()) + } + if e := vf.Enum(); e != nil && e.Values().Len() > 0 && e.Values().Get(0).Number() != 0 { + return errors.New("map enum value must have zero number for the first value") + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go new file mode 100644 index 000000000..a7c5ceffc --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -0,0 +1,252 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "fmt" + "strings" + + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a +// google.protobuf.FileDescriptorProto message. +func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { + p := &descriptorpb.FileDescriptorProto{ + Name: proto.String(file.Path()), + Options: proto.Clone(file.Options()).(*descriptorpb.FileOptions), + } + if file.Package() != "" { + p.Package = proto.String(string(file.Package())) + } + for i, imports := 0, file.Imports(); i < imports.Len(); i++ { + imp := imports.Get(i) + p.Dependency = append(p.Dependency, imp.Path()) + if imp.IsPublic { + p.PublicDependency = append(p.PublicDependency, int32(i)) + } + if imp.IsWeak { + p.WeakDependency = append(p.WeakDependency, int32(i)) + } + } + for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ { + loc := locs.Get(i) + l := &descriptorpb.SourceCodeInfo_Location{} + l.Path = append(l.Path, loc.Path...) + if loc.StartLine == loc.EndLine { + l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndColumn)} + } else { + l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndLine), int32(loc.EndColumn)} + } + l.LeadingDetachedComments = append([]string(nil), loc.LeadingDetachedComments...) + if loc.LeadingComments != "" { + l.LeadingComments = proto.String(loc.LeadingComments) + } + if loc.TrailingComments != "" { + l.TrailingComments = proto.String(loc.TrailingComments) + } + if p.SourceCodeInfo == nil { + p.SourceCodeInfo = &descriptorpb.SourceCodeInfo{} + } + p.SourceCodeInfo.Location = append(p.SourceCodeInfo.Location, l) + + } + for i, messages := 0, file.Messages(); i < messages.Len(); i++ { + p.MessageType = append(p.MessageType, ToDescriptorProto(messages.Get(i))) + } + for i, enums := 0, file.Enums(); i < enums.Len(); i++ { + p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) + } + for i, services := 0, file.Services(); i < services.Len(); i++ { + p.Service = append(p.Service, ToServiceDescriptorProto(services.Get(i))) + } + for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { + p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) + } + if syntax := file.Syntax(); syntax != protoreflect.Proto2 { + p.Syntax = proto.String(file.Syntax().String()) + } + return p +} + +// ToDescriptorProto copies a protoreflect.MessageDescriptor into a +// google.protobuf.DescriptorProto message. +func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { + p := &descriptorpb.DescriptorProto{ + Name: proto.String(string(message.Name())), + Options: proto.Clone(message.Options()).(*descriptorpb.MessageOptions), + } + for i, fields := 0, message.Fields(); i < fields.Len(); i++ { + p.Field = append(p.Field, ToFieldDescriptorProto(fields.Get(i))) + } + for i, exts := 0, message.Extensions(); i < exts.Len(); i++ { + p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) + } + for i, messages := 0, message.Messages(); i < messages.Len(); i++ { + p.NestedType = append(p.NestedType, ToDescriptorProto(messages.Get(i))) + } + for i, enums := 0, message.Enums(); i < enums.Len(); i++ { + p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) + } + for i, xranges := 0, message.ExtensionRanges(); i < xranges.Len(); i++ { + xrange := xranges.Get(i) + p.ExtensionRange = append(p.ExtensionRange, &descriptorpb.DescriptorProto_ExtensionRange{ + Start: proto.Int32(int32(xrange[0])), + End: proto.Int32(int32(xrange[1])), + Options: proto.Clone(message.ExtensionRangeOptions(i)).(*descriptorpb.ExtensionRangeOptions), + }) + } + for i, oneofs := 0, message.Oneofs(); i < oneofs.Len(); i++ { + p.OneofDecl = append(p.OneofDecl, ToOneofDescriptorProto(oneofs.Get(i))) + } + for i, ranges := 0, message.ReservedRanges(); i < ranges.Len(); i++ { + rrange := ranges.Get(i) + p.ReservedRange = append(p.ReservedRange, &descriptorpb.DescriptorProto_ReservedRange{ + Start: proto.Int32(int32(rrange[0])), + End: proto.Int32(int32(rrange[1])), + }) + } + for i, names := 0, message.ReservedNames(); i < names.Len(); i++ { + p.ReservedName = append(p.ReservedName, string(names.Get(i))) + } + return p +} + +// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a +// google.protobuf.FieldDescriptorProto message. +func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { + p := &descriptorpb.FieldDescriptorProto{ + Name: proto.String(string(field.Name())), + Number: proto.Int32(int32(field.Number())), + Label: descriptorpb.FieldDescriptorProto_Label(field.Cardinality()).Enum(), + Options: proto.Clone(field.Options()).(*descriptorpb.FieldOptions), + } + if field.IsExtension() { + p.Extendee = fullNameOf(field.ContainingMessage()) + } + if field.Kind().IsValid() { + p.Type = descriptorpb.FieldDescriptorProto_Type(field.Kind()).Enum() + } + if field.Enum() != nil { + p.TypeName = fullNameOf(field.Enum()) + } + if field.Message() != nil { + p.TypeName = fullNameOf(field.Message()) + } + if field.HasJSONName() { + // A bug in older versions of protoc would always populate the + // "json_name" option for extensions when it is meaningless. + // When it did so, it would always use the camel-cased field name. + if field.IsExtension() { + p.JsonName = proto.String(strs.JSONCamelCase(string(field.Name()))) + } else { + p.JsonName = proto.String(field.JSONName()) + } + } + if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() { + p.Proto3Optional = proto.Bool(true) + } + if field.HasDefault() { + def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor) + if err != nil && field.DefaultEnumValue() != nil { + def = string(field.DefaultEnumValue().Name()) // occurs for unresolved enum values + } else if err != nil { + panic(fmt.Sprintf("%v: %v", field.FullName(), err)) + } + p.DefaultValue = proto.String(def) + } + if oneof := field.ContainingOneof(); oneof != nil { + p.OneofIndex = proto.Int32(int32(oneof.Index())) + } + return p +} + +// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a +// google.protobuf.OneofDescriptorProto message. +func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { + return &descriptorpb.OneofDescriptorProto{ + Name: proto.String(string(oneof.Name())), + Options: proto.Clone(oneof.Options()).(*descriptorpb.OneofOptions), + } +} + +// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a +// google.protobuf.EnumDescriptorProto message. +func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { + p := &descriptorpb.EnumDescriptorProto{ + Name: proto.String(string(enum.Name())), + Options: proto.Clone(enum.Options()).(*descriptorpb.EnumOptions), + } + for i, values := 0, enum.Values(); i < values.Len(); i++ { + p.Value = append(p.Value, ToEnumValueDescriptorProto(values.Get(i))) + } + for i, ranges := 0, enum.ReservedRanges(); i < ranges.Len(); i++ { + rrange := ranges.Get(i) + p.ReservedRange = append(p.ReservedRange, &descriptorpb.EnumDescriptorProto_EnumReservedRange{ + Start: proto.Int32(int32(rrange[0])), + End: proto.Int32(int32(rrange[1])), + }) + } + for i, names := 0, enum.ReservedNames(); i < names.Len(); i++ { + p.ReservedName = append(p.ReservedName, string(names.Get(i))) + } + return p +} + +// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a +// google.protobuf.EnumValueDescriptorProto message. +func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { + return &descriptorpb.EnumValueDescriptorProto{ + Name: proto.String(string(value.Name())), + Number: proto.Int32(int32(value.Number())), + Options: proto.Clone(value.Options()).(*descriptorpb.EnumValueOptions), + } +} + +// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a +// google.protobuf.ServiceDescriptorProto message. +func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { + p := &descriptorpb.ServiceDescriptorProto{ + Name: proto.String(string(service.Name())), + Options: proto.Clone(service.Options()).(*descriptorpb.ServiceOptions), + } + for i, methods := 0, service.Methods(); i < methods.Len(); i++ { + p.Method = append(p.Method, ToMethodDescriptorProto(methods.Get(i))) + } + return p +} + +// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a +// google.protobuf.MethodDescriptorProto message. +func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { + p := &descriptorpb.MethodDescriptorProto{ + Name: proto.String(string(method.Name())), + InputType: fullNameOf(method.Input()), + OutputType: fullNameOf(method.Output()), + Options: proto.Clone(method.Options()).(*descriptorpb.MethodOptions), + } + if method.IsStreamingClient() { + p.ClientStreaming = proto.Bool(true) + } + if method.IsStreamingServer() { + p.ServerStreaming = proto.Bool(true) + } + return p +} + +func fullNameOf(d protoreflect.Descriptor) *string { + if d == nil { + return nil + } + if strings.HasPrefix(string(d.FullName()), unknownPrefix) { + return proto.String(string(d.FullName()[len(unknownPrefix):])) + } + return proto.String("." + string(d.FullName())) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index 6be5d16e9..d5d5af6eb 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -53,6 +53,7 @@ type ( FindExtensionByName(field FullName) (ExtensionType, error) FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error) } + Depth int } unmarshalOutput = struct { pragma.NoUnkeyedLiterals diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go index 32ea3d98c..121ba3a07 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go @@ -4,6 +4,10 @@ package protoreflect +import ( + "strconv" +) + // SourceLocations is a list of source locations. type SourceLocations interface { // Len reports the number of source locations in the proto file. @@ -11,9 +15,20 @@ type SourceLocations interface { // Get returns the ith SourceLocation. It panics if out of bounds. Get(int) SourceLocation - doNotImplement + // ByPath returns the SourceLocation for the given path, + // returning the first location if multiple exist for the same path. + // If multiple locations exist for the same path, + // then SourceLocation.Next index can be used to identify the + // index of the next SourceLocation. + // If no location exists for this path, it returns the zero value. + ByPath(path SourcePath) SourceLocation - // TODO: Add ByPath and ByDescriptor helper methods. + // ByDescriptor returns the SourceLocation for the given descriptor, + // returning the first location if multiple exist for the same path. + // If no location exists for this descriptor, it returns the zero value. + ByDescriptor(desc Descriptor) SourceLocation + + doNotImplement } // SourceLocation describes a source location and @@ -39,6 +54,10 @@ type SourceLocation struct { LeadingComments string // TrailingComments is the trailing attached comment for the declaration. TrailingComments string + + // Next is an index into SourceLocations for the next source location that + // has the same Path. It is zero if there is no next location. + Next int } // SourcePath identifies part of a file descriptor for a source location. @@ -48,5 +67,62 @@ type SourceLocation struct { // See google.protobuf.SourceCodeInfo.Location.path. type SourcePath []int32 -// TODO: Add SourcePath.String method to pretty-print the path. For example: -// ".message_type[6].nested_type[15].field[3]" +// Equal reports whether p1 equals p2. +func (p1 SourcePath) Equal(p2 SourcePath) bool { + if len(p1) != len(p2) { + return false + } + for i := range p1 { + if p1[i] != p2[i] { + return false + } + } + return true +} + +// String formats the path in a humanly readable manner. +// The output is guaranteed to be deterministic, +// making it suitable for use as a key into a Go map. +// It is not guaranteed to be stable as the exact output could change +// in a future version of this module. +// +// Example output: +// .message_type[6].nested_type[15].field[3] +func (p SourcePath) String() string { + b := p.appendFileDescriptorProto(nil) + for _, i := range p { + b = append(b, '.') + b = strconv.AppendInt(b, int64(i), 10) + } + return string(b) +} + +type appendFunc func(*SourcePath, []byte) []byte + +func (p *SourcePath) appendSingularField(b []byte, name string, f appendFunc) []byte { + if len(*p) == 0 { + return b + } + b = append(b, '.') + b = append(b, name...) + *p = (*p)[1:] + if f != nil { + b = f(p, b) + } + return b +} + +func (p *SourcePath) appendRepeatedField(b []byte, name string, f appendFunc) []byte { + b = p.appendSingularField(b, name, nil) + if len(*p) == 0 || (*p)[0] < 0 { + return b + } + b = append(b, '[') + b = strconv.AppendUint(b, uint64((*p)[0]), 10) + b = append(b, ']') + *p = (*p)[1:] + if f != nil { + b = f(p, b) + } + return b +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go new file mode 100644 index 000000000..b03c1223c --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -0,0 +1,461 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package protoreflect + +func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "package", nil) + case 3: + b = p.appendRepeatedField(b, "dependency", nil) + case 10: + b = p.appendRepeatedField(b, "public_dependency", nil) + case 11: + b = p.appendRepeatedField(b, "weak_dependency", nil) + case 4: + b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto) + case 5: + b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto) + case 6: + b = p.appendRepeatedField(b, "service", (*SourcePath).appendServiceDescriptorProto) + case 7: + b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto) + case 8: + b = p.appendSingularField(b, "options", (*SourcePath).appendFileOptions) + case 9: + b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) + case 12: + b = p.appendSingularField(b, "syntax", nil) + } + return b +} + +func (p *SourcePath) appendDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "field", (*SourcePath).appendFieldDescriptorProto) + case 6: + b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto) + case 3: + b = p.appendRepeatedField(b, "nested_type", (*SourcePath).appendDescriptorProto) + case 4: + b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto) + case 5: + b = p.appendRepeatedField(b, "extension_range", (*SourcePath).appendDescriptorProto_ExtensionRange) + case 8: + b = p.appendRepeatedField(b, "oneof_decl", (*SourcePath).appendOneofDescriptorProto) + case 7: + b = p.appendSingularField(b, "options", (*SourcePath).appendMessageOptions) + case 9: + b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange) + case 10: + b = p.appendRepeatedField(b, "reserved_name", nil) + } + return b +} + +func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "value", (*SourcePath).appendEnumValueDescriptorProto) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendEnumOptions) + case 4: + b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange) + case 5: + b = p.appendRepeatedField(b, "reserved_name", nil) + } + return b +} + +func (p *SourcePath) appendServiceDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "method", (*SourcePath).appendMethodDescriptorProto) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendServiceOptions) + } + return b +} + +func (p *SourcePath) appendFieldDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 3: + b = p.appendSingularField(b, "number", nil) + case 4: + b = p.appendSingularField(b, "label", nil) + case 5: + b = p.appendSingularField(b, "type", nil) + case 6: + b = p.appendSingularField(b, "type_name", nil) + case 2: + b = p.appendSingularField(b, "extendee", nil) + case 7: + b = p.appendSingularField(b, "default_value", nil) + case 9: + b = p.appendSingularField(b, "oneof_index", nil) + case 10: + b = p.appendSingularField(b, "json_name", nil) + case 8: + b = p.appendSingularField(b, "options", (*SourcePath).appendFieldOptions) + case 17: + b = p.appendSingularField(b, "proto3_optional", nil) + } + return b +} + +func (p *SourcePath) appendFileOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "java_package", nil) + case 8: + b = p.appendSingularField(b, "java_outer_classname", nil) + case 10: + b = p.appendSingularField(b, "java_multiple_files", nil) + case 20: + b = p.appendSingularField(b, "java_generate_equals_and_hash", nil) + case 27: + b = p.appendSingularField(b, "java_string_check_utf8", nil) + case 9: + b = p.appendSingularField(b, "optimize_for", nil) + case 11: + b = p.appendSingularField(b, "go_package", nil) + case 16: + b = p.appendSingularField(b, "cc_generic_services", nil) + case 17: + b = p.appendSingularField(b, "java_generic_services", nil) + case 18: + b = p.appendSingularField(b, "py_generic_services", nil) + case 42: + b = p.appendSingularField(b, "php_generic_services", nil) + case 23: + b = p.appendSingularField(b, "deprecated", nil) + case 31: + b = p.appendSingularField(b, "cc_enable_arenas", nil) + case 36: + b = p.appendSingularField(b, "objc_class_prefix", nil) + case 37: + b = p.appendSingularField(b, "csharp_namespace", nil) + case 39: + b = p.appendSingularField(b, "swift_prefix", nil) + case 40: + b = p.appendSingularField(b, "php_class_prefix", nil) + case 41: + b = p.appendSingularField(b, "php_namespace", nil) + case 44: + b = p.appendSingularField(b, "php_metadata_namespace", nil) + case 45: + b = p.appendSingularField(b, "ruby_package", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendSourceCodeInfo(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendRepeatedField(b, "location", (*SourcePath).appendSourceCodeInfo_Location) + } + return b +} + +func (p *SourcePath) appendDescriptorProto_ExtensionRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendExtensionRangeOptions) + } + return b +} + +func (p *SourcePath) appendOneofDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "options", (*SourcePath).appendOneofOptions) + } + return b +} + +func (p *SourcePath) appendMessageOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "message_set_wire_format", nil) + case 2: + b = p.appendSingularField(b, "no_standard_descriptor_accessor", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 7: + b = p.appendSingularField(b, "map_entry", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendDescriptorProto_ReservedRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + } + return b +} + +func (p *SourcePath) appendEnumValueDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "number", nil) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendEnumValueOptions) + } + return b +} + +func (p *SourcePath) appendEnumOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 2: + b = p.appendSingularField(b, "allow_alias", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendEnumDescriptorProto_EnumReservedRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + } + return b +} + +func (p *SourcePath) appendMethodDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "input_type", nil) + case 3: + b = p.appendSingularField(b, "output_type", nil) + case 4: + b = p.appendSingularField(b, "options", (*SourcePath).appendMethodOptions) + case 5: + b = p.appendSingularField(b, "client_streaming", nil) + case 6: + b = p.appendSingularField(b, "server_streaming", nil) + } + return b +} + +func (p *SourcePath) appendServiceOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 33: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendFieldOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "ctype", nil) + case 2: + b = p.appendSingularField(b, "packed", nil) + case 6: + b = p.appendSingularField(b, "jstype", nil) + case 5: + b = p.appendSingularField(b, "lazy", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 10: + b = p.appendSingularField(b, "weak", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendUninterpretedOption(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 2: + b = p.appendRepeatedField(b, "name", (*SourcePath).appendUninterpretedOption_NamePart) + case 3: + b = p.appendSingularField(b, "identifier_value", nil) + case 4: + b = p.appendSingularField(b, "positive_int_value", nil) + case 5: + b = p.appendSingularField(b, "negative_int_value", nil) + case 6: + b = p.appendSingularField(b, "double_value", nil) + case 7: + b = p.appendSingularField(b, "string_value", nil) + case 8: + b = p.appendSingularField(b, "aggregate_value", nil) + } + return b +} + +func (p *SourcePath) appendSourceCodeInfo_Location(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendRepeatedField(b, "path", nil) + case 2: + b = p.appendRepeatedField(b, "span", nil) + case 3: + b = p.appendSingularField(b, "leading_comments", nil) + case 4: + b = p.appendSingularField(b, "trailing_comments", nil) + case 6: + b = p.appendRepeatedField(b, "leading_detached_comments", nil) + } + return b +} + +func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendOneofOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendMethodOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 33: + b = p.appendSingularField(b, "deprecated", nil) + case 34: + b = p.appendSingularField(b, "idempotency_level", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name_part", nil) + case 2: + b = p.appendSingularField(b, "is_extension", nil) + } + return b +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 5be14a725..8e53c44a9 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -232,11 +232,15 @@ type MessageDescriptor interface { type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } // MessageType encapsulates a MessageDescriptor with a concrete Go implementation. +// It is recommended that implementations of this interface also implement the +// MessageFieldTypes interface. type MessageType interface { // New returns a newly allocated empty message. + // It may return nil for synthetic messages representing a map entry. New() Message // Zero returns an empty, read-only message. + // It may return nil for synthetic messages representing a map entry. Zero() Message // Descriptor returns the message descriptor. @@ -245,6 +249,26 @@ type MessageType interface { Descriptor() MessageDescriptor } +// MessageFieldTypes extends a MessageType by providing type information +// regarding enums and messages referenced by the message fields. +type MessageFieldTypes interface { + MessageType + + // Enum returns the EnumType for the ith field in Descriptor.Fields. + // It returns nil if the ith field is not an enum kind. + // It panics if out of bounds. + // + // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum() + Enum(i int) EnumType + + // Message returns the MessageType for the ith field in Descriptor.Fields. + // It returns nil if the ith field is not a message or group kind. + // It panics if out of bounds. + // + // Invariant: mt.Message(i).Descriptor() == mt.Descriptor().Fields(i).Message() + Message(i int) MessageType +} + // MessageDescriptors is a list of message declarations. type MessageDescriptors interface { // Len reports the number of messages. @@ -279,8 +303,15 @@ type FieldDescriptor interface { // JSONName reports the name used for JSON serialization. // It is usually the camel-cased form of the field name. + // Extension fields are represented by the full name surrounded by brackets. JSONName() string + // TextName reports the name used for text serialization. + // It is usually the name of the field, except that groups use the name + // of the inlined message, and extension fields are represented by the + // full name surrounded by brackets. + TextName() string + // HasPresence reports whether the field distinguishes between unpopulated // and default values. HasPresence() bool @@ -371,6 +402,9 @@ type FieldDescriptors interface { // ByJSONName returns the FieldDescriptor for a field with s as the JSON name. // It returns nil if not found. ByJSONName(s string) FieldDescriptor + // ByTextName returns the FieldDescriptor for a field with s as the text name. + // It returns nil if not found. + ByTextName(s string) FieldDescriptor // ByNumber returns the FieldDescriptor for a field numbered n. // It returns nil if not found. ByNumber(n FieldNumber) FieldDescriptor diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go index 918e685e1..7ced876f4 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 5a3414724..eb7764c30 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -41,6 +41,31 @@ import ( // Converting to/from a Value and a concrete Go value panics on type mismatch. // For example, ValueOf("hello").Int() panics because this attempts to // retrieve an int64 from a string. +// +// List, Map, and Message Values are called "composite" values. +// +// A composite Value may alias (reference) memory at some location, +// such that changes to the Value updates the that location. +// A composite value acquired with a Mutable method, such as Message.Mutable, +// always references the source object. +// +// For example: +// // Append a 0 to a "repeated int32" field. +// // Since the Value returned by Mutable is guaranteed to alias +// // the source message, modifying the Value modifies the message. +// message.Mutable(fieldDesc).(List).Append(protoreflect.ValueOfInt32(0)) +// +// // Assign [0] to a "repeated int32" field by creating a new Value, +// // modifying it, and assigning it. +// list := message.NewField(fieldDesc).(List) +// list.Append(protoreflect.ValueOfInt32(0)) +// message.Set(fieldDesc, list) +// // ERROR: Since it is not defined whether Set aliases the source, +// // appending to the List here may or may not modify the message. +// list.Append(protoreflect.ValueOfInt32(0)) +// +// Some operations, such as Message.Get, may return an "empty, read-only" +// composite Value. Modifying an empty, read-only value panics. type Value value // The protoreflect API uses a custom Value union type instead of interface{} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go index c45debdca..702ddf22a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 5e5f96716..59f024c44 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -17,24 +17,49 @@ package protoregistry import ( "fmt" - "log" + "os" "strings" "sync" + "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/reflect/protoreflect" ) +// conflictPolicy configures the policy for handling registration conflicts. +// +// It can be over-written at compile time with a linker-initialized variable: +// go build -ldflags "-X google.golang.org/protobuf/reflect/protoregistry.conflictPolicy=warn" +// +// It can be over-written at program execution with an environment variable: +// GOLANG_PROTOBUF_REGISTRATION_CONFLICT=warn ./main +// +// Neither of the above are covered by the compatibility promise and +// may be removed in a future release of this module. +var conflictPolicy = "panic" // "panic" | "warn" | "ignore" + // ignoreConflict reports whether to ignore a registration conflict // given the descriptor being registered and the error. // It is a variable so that the behavior is easily overridden in another file. var ignoreConflict = func(d protoreflect.Descriptor, err error) bool { - log.Printf(""+ - "WARNING: %v\n"+ - "A future release will panic on registration conflicts. See:\n"+ - "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict\n"+ - "\n", err) - return true + const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT" + const faq = "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict" + policy := conflictPolicy + if v := os.Getenv(env); v != "" { + policy = v + } + switch policy { + case "panic": + panic(fmt.Sprintf("%v\nSee %v\n", err, faq)) + case "warn": + fmt.Fprintf(os.Stderr, "WARNING: %v\nSee %v\n\n", err, faq) + return true + case "ignore": + return true + default: + panic("invalid " + env + " value: " + os.Getenv(env)) + } } var globalMutex sync.RWMutex @@ -69,7 +94,8 @@ type Files struct { // Note that enum values are in the top-level since that are in the same // scope as the parent enum. descsByName map[protoreflect.FullName]interface{} - filesByPath map[string]protoreflect.FileDescriptor + filesByPath map[string][]protoreflect.FileDescriptor + numFiles int } type packageDescriptor struct { @@ -92,48 +118,16 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { r.descsByName = map[protoreflect.FullName]interface{}{ "": &packageDescriptor{}, } - r.filesByPath = make(map[string]protoreflect.FileDescriptor) + r.filesByPath = make(map[string][]protoreflect.FileDescriptor) } path := file.Path() - if prev := r.filesByPath[path]; prev != nil { - // TODO: Remove this after some soak-in period after moving these types. - var prevPath string - const prevModule = "google.golang.org/genproto" - const prevVersion = "cb27e3aa (May 26th, 2020)" - switch path { - case "google/protobuf/field_mask.proto": - prevPath = prevModule + "/protobuf/field_mask" - case "google/protobuf/api.proto": - prevPath = prevModule + "/protobuf/api" - case "google/protobuf/type.proto": - prevPath = prevModule + "/protobuf/ptype" - case "google/protobuf/source_context.proto": - prevPath = prevModule + "/protobuf/source_context" - } - if r == GlobalFiles && prevPath != "" { - pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto") - pkgName = strings.Replace(pkgName, "_", "", -1) + "pb" - currPath := "google.golang.org/protobuf/types/known/" + pkgName - panic(fmt.Sprintf(""+ - "duplicate registration of %q\n"+ - "\n"+ - "The generated definition for this file has moved:\n"+ - "\tfrom: %q\n"+ - "\tto: %q\n"+ - "A dependency on the %q module must\n"+ - "be at version %v or higher.\n"+ - "\n"+ - "Upgrade the dependency by running:\n"+ - "\tgo get -u %v\n", - path, prevPath, currPath, prevModule, prevVersion, prevPath)) - } - + if prev := r.filesByPath[path]; len(prev) > 0 { + r.checkGenProtoConflict(path) err := errors.New("file %q is already registered", file.Path()) - err = amendErrorWithCaller(err, prev, file) - if r == GlobalFiles && ignoreConflict(file, err) { - err = nil + err = amendErrorWithCaller(err, prev[0], file) + if !(r == GlobalFiles && ignoreConflict(file, err)) { + return err } - return err } for name := file.Package(); name != ""; name = name.Parent() { @@ -174,10 +168,52 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) { r.descsByName[d.FullName()] = d }) - r.filesByPath[path] = file + r.filesByPath[path] = append(r.filesByPath[path], file) + r.numFiles++ return nil } +// Several well-known types were hosted in the google.golang.org/genproto module +// but were later moved to this module. To avoid a weak dependency on the +// genproto module (and its relatively large set of transitive dependencies), +// we rely on a registration conflict to determine whether the genproto version +// is too old (i.e., does not contain aliases to the new type declarations). +func (r *Files) checkGenProtoConflict(path string) { + if r != GlobalFiles { + return + } + var prevPath string + const prevModule = "google.golang.org/genproto" + const prevVersion = "cb27e3aa (May 26th, 2020)" + switch path { + case "google/protobuf/field_mask.proto": + prevPath = prevModule + "/protobuf/field_mask" + case "google/protobuf/api.proto": + prevPath = prevModule + "/protobuf/api" + case "google/protobuf/type.proto": + prevPath = prevModule + "/protobuf/ptype" + case "google/protobuf/source_context.proto": + prevPath = prevModule + "/protobuf/source_context" + default: + return + } + pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto") + pkgName = strings.Replace(pkgName, "_", "", -1) + "pb" // e.g., "field_mask" => "fieldmaskpb" + currPath := "google.golang.org/protobuf/types/known/" + pkgName + panic(fmt.Sprintf(""+ + "duplicate registration of %q\n"+ + "\n"+ + "The generated definition for this file has moved:\n"+ + "\tfrom: %q\n"+ + "\tto: %q\n"+ + "A dependency on the %q module must\n"+ + "be at version %v or higher.\n"+ + "\n"+ + "Upgrade the dependency by running:\n"+ + "\tgo get -u %v\n", + path, prevPath, currPath, prevModule, prevVersion, prevPath)) +} + // FindDescriptorByName looks up a descriptor by the full name. // // This returns (nil, NotFound) if not found. @@ -273,6 +309,7 @@ func (s *nameSuffix) Pop() (name protoreflect.Name) { // FindFileByPath looks up a file by the path. // // This returns (nil, NotFound) if not found. +// This returns an error if multiple files have the same path. func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { if r == nil { return nil, NotFound @@ -281,13 +318,19 @@ func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) globalMutex.RLock() defer globalMutex.RUnlock() } - if fd, ok := r.filesByPath[path]; ok { - return fd, nil + fds := r.filesByPath[path] + switch len(fds) { + case 0: + return nil, NotFound + case 1: + return fds[0], nil + default: + return nil, errors.New("multiple files named %q", path) } - return nil, NotFound } -// NumFiles reports the number of registered files. +// NumFiles reports the number of registered files, +// including duplicate files with the same name. func (r *Files) NumFiles() int { if r == nil { return 0 @@ -296,10 +339,11 @@ func (r *Files) NumFiles() int { globalMutex.RLock() defer globalMutex.RUnlock() } - return len(r.filesByPath) + return r.numFiles } // RangeFiles iterates over all registered files while f returns true. +// If multiple files have the same name, RangeFiles iterates over all of them. // The iteration order is undefined. func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) { if r == nil { @@ -309,9 +353,11 @@ func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) { globalMutex.RLock() defer globalMutex.RUnlock() } - for _, file := range r.filesByPath { - if !f(file) { - return + for _, files := range r.filesByPath { + for _, file := range files { + if !f(file) { + return + } } } } @@ -560,13 +606,25 @@ func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumTyp return nil, NotFound } -// FindMessageByName looks up a message by its full name. -// E.g., "google.protobuf.Any" +// FindMessageByName looks up a message by its full name, +// e.g. "google.protobuf.Any". // -// This return (nil, NotFound) if not found. +// This returns (nil, NotFound) if not found. func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { - // The full name by itself is a valid URL. - return r.FindMessageByURL(string(message)) + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if v := r.typesByName[message]; v != nil { + if mt, _ := v.(protoreflect.MessageType); mt != nil { + return mt, nil + } + return nil, errors.New("found wrong type: got %v, want message", typeName(v)) + } + return nil, NotFound } // FindMessageByURL looks up a message by a URL identifier. @@ -574,6 +632,8 @@ func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.M // // This returns (nil, NotFound) if not found. func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { + // This function is similar to FindMessageByName but + // truncates anything before and including '/' in the URL. if r == nil { return nil, NotFound } @@ -613,6 +673,26 @@ func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.E if xt, _ := v.(protoreflect.ExtensionType); xt != nil { return xt, nil } + + // MessageSet extensions are special in that the name of the extension + // is the name of the message type used to extend the MessageSet. + // This naming scheme is used by text and JSON serialization. + // + // This feature is protected by the ProtoLegacy flag since MessageSets + // are a proto1 feature that is long deprecated. + if flags.ProtoLegacy { + if _, ok := v.(protoreflect.MessageType); ok { + field := field.Append(messageset.ExtensionName) + if v := r.typesByName[field]; v != nil { + if xt, _ := v.(protoreflect.ExtensionType); xt != nil { + if messageset.IsMessageSetExtension(xt.TypeDescriptor()) { + return xt, nil + } + } + } + } + } + return nil, errors.New("found wrong type: got %v, want extension", typeName(v)) } return nil, NotFound diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 32c04f67e..44cf467d8 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -103,6 +103,7 @@ type UnmarshalInput = struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + Depth int } // UnmarshalOutput is output from the Unmarshal method. diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 824237856..abe4ab511 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -43,7 +43,6 @@ package descriptorpb import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoiface "google.golang.org/protobuf/runtime/protoiface" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" @@ -829,15 +828,6 @@ func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3} } -var extRange_ExtensionRangeOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use ExtensionRangeOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*ExtensionRangeOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_ExtensionRangeOptions -} - func (x *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -1520,15 +1510,6 @@ func (*FileOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10} } -var extRange_FileOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use FileOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*FileOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_FileOptions -} - func (x *FileOptions) GetJavaPackage() string { if x != nil && x.JavaPackage != nil { return *x.JavaPackage @@ -1776,15 +1757,6 @@ func (*MessageOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{11} } -var extRange_MessageOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use MessageOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*MessageOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_MessageOptions -} - func (x *MessageOptions) GetMessageSetWireFormat() bool { if x != nil && x.MessageSetWireFormat != nil { return *x.MessageSetWireFormat @@ -1930,15 +1902,6 @@ func (*FieldOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12} } -var extRange_FieldOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use FieldOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*FieldOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_FieldOptions -} - func (x *FieldOptions) GetCtype() FieldOptions_CType { if x != nil && x.Ctype != nil { return *x.Ctype @@ -2030,15 +1993,6 @@ func (*OneofOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} } -var extRange_OneofOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use OneofOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*OneofOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_OneofOptions -} - func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2101,15 +2055,6 @@ func (*EnumOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{14} } -var extRange_EnumOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use EnumOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*EnumOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_EnumOptions -} - func (x *EnumOptions) GetAllowAlias() bool { if x != nil && x.AllowAlias != nil { return *x.AllowAlias @@ -2183,15 +2128,6 @@ func (*EnumValueOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{15} } -var extRange_EnumValueOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use EnumValueOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*EnumValueOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_EnumValueOptions -} - func (x *EnumValueOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2258,15 +2194,6 @@ func (*ServiceOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} } -var extRange_ServiceOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use ServiceOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*ServiceOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_ServiceOptions -} - func (x *ServiceOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2335,15 +2262,6 @@ func (*MethodOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17} } -var extRange_MethodOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use MethodOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*MethodOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_MethodOptions -} - func (x *MethodOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -3557,16 +3475,15 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, - 0x64, 0x42, 0x8f, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x3e, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x3b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0xf8, 0x01, 0x01, - 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, + 0x64, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, + 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, } var ( diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 82a473e26..8c10797b9 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -166,10 +166,13 @@ import ( // Example 4: Pack and unpack a message in Go // // foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } // ... // foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// if err := any.UnmarshalTo(foo); err != nil { // ... // } // @@ -420,14 +423,15 @@ var file_google_protobuf_any_proto_rawDesc = []byte{ 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x6f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x25, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0xa2, 0x02, - 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, - 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, + 0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, + 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index f7a110994..a583ca2f6 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -303,16 +303,16 @@ var file_google_protobuf_duration_proto_rawDesc = []byte{ 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x7c, 0x0a, - 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, - 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, + 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index 32a583df5..e7fcea31f 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -93,15 +93,15 @@ var file_google_protobuf_empty_proto_rawDesc = []byte{ 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07, - 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, - 0x65, 0x6d, 0x70, 0x74, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, - 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, + 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, + 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index c25e4bd7d..c9ae92132 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -134,7 +134,16 @@ import ( // .setNanos((int) ((millis % 1000) * 1000000)).build(); // // -// Example 5: Compute Timestamp from current time in Python. +// Example 5: Compute Timestamp from Java `Instant.now()`. +// +// Instant now = Instant.now(); +// +// Timestamp timestamp = +// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +// .setNanos(now.getNano()).build(); +// +// +// Example 6: Compute Timestamp from current time in Python. // // timestamp = Timestamp() // timestamp.GetCurrentTime() @@ -306,15 +315,15 @@ var file_google_protobuf_timestamp_proto_rawDesc = []byte{ 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, - 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, - 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, + 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01, + 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, + 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/modules.txt b/vendor/modules.txt index b6805b182..0d0d2bdb0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,30 +1,27 @@ -# cloud.google.com/go v0.71.0 +# github.com/agext/levenshtein v1.2.3 ## explicit -# github.com/agext/levenshtein v1.2.2 github.com/agext/levenshtein -# github.com/apparentlymart/go-cidr v1.0.1 +# github.com/apparentlymart/go-cidr v1.1.0 github.com/apparentlymart/go-cidr/cidr -# github.com/apparentlymart/go-textseg v1.0.0 -github.com/apparentlymart/go-textseg/textseg # github.com/apparentlymart/go-textseg/v13 v13.0.0 github.com/apparentlymart/go-textseg/v13/textseg # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew -# github.com/fatih/color v1.7.0 +# github.com/fatih/color v1.13.0 github.com/fatih/color -# github.com/go-stack/stack v1.8.0 +# github.com/go-stack/stack v1.8.1 github.com/go-stack/stack -# github.com/golang/protobuf v1.4.3 +# github.com/golang/protobuf v1.5.2 +github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto -github.com/golang/protobuf/protoc-gen-go/descriptor github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/empty github.com/golang/protobuf/ptypes/timestamp -# github.com/golang/snappy v0.0.1 +# github.com/golang/snappy v0.0.4 github.com/golang/snappy -# github.com/google/go-cmp v0.5.6 +# github.com/google/go-cmp v0.5.8 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags @@ -32,7 +29,8 @@ github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value # github.com/google/go-querystring v1.1.0 github.com/google/go-querystring/query -# github.com/hashicorp/errwrap v1.0.0 +# github.com/hashicorp/errwrap v1.1.0 +## explicit github.com/hashicorp/errwrap # github.com/hashicorp/go-checkpoint v0.5.0 github.com/hashicorp/go-checkpoint @@ -45,18 +43,18 @@ github.com/hashicorp/go-cty/cty/gocty github.com/hashicorp/go-cty/cty/json github.com/hashicorp/go-cty/cty/msgpack github.com/hashicorp/go-cty/cty/set -# github.com/hashicorp/go-hclog v0.16.1 +# github.com/hashicorp/go-hclog v1.2.1 github.com/hashicorp/go-hclog # github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-multierror -# github.com/hashicorp/go-plugin v1.4.1 +# github.com/hashicorp/go-plugin v1.4.4 github.com/hashicorp/go-plugin github.com/hashicorp/go-plugin/internal/plugin -# github.com/hashicorp/go-uuid v1.0.2 +# github.com/hashicorp/go-uuid v1.0.3 github.com/hashicorp/go-uuid -# github.com/hashicorp/go-version v1.3.0 +# github.com/hashicorp/go-version v1.5.0 github.com/hashicorp/go-version -# github.com/hashicorp/hc-install v0.3.1 +# github.com/hashicorp/hc-install v0.3.2 github.com/hashicorp/hc-install github.com/hashicorp/hc-install/checkpoint github.com/hashicorp/hc-install/errors @@ -71,18 +69,19 @@ github.com/hashicorp/hc-install/internal/version github.com/hashicorp/hc-install/product github.com/hashicorp/hc-install/releases github.com/hashicorp/hc-install/src -# github.com/hashicorp/hcl/v2 v2.3.0 +# github.com/hashicorp/hcl/v2 v2.12.0 github.com/hashicorp/hcl/v2 github.com/hashicorp/hcl/v2/ext/customdecode github.com/hashicorp/hcl/v2/hclsyntax # github.com/hashicorp/logutils v1.0.0 github.com/hashicorp/logutils -# github.com/hashicorp/terraform-exec v0.15.0 +# github.com/hashicorp/terraform-exec v0.16.1 github.com/hashicorp/terraform-exec/internal/version github.com/hashicorp/terraform-exec/tfexec -# github.com/hashicorp/terraform-json v0.13.0 +# github.com/hashicorp/terraform-json v0.14.0 github.com/hashicorp/terraform-json -# github.com/hashicorp/terraform-plugin-go v0.5.0 +# github.com/hashicorp/terraform-plugin-go v0.9.1 +github.com/hashicorp/terraform-plugin-go/internal/logging github.com/hashicorp/terraform-plugin-go/tfprotov5 github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5 @@ -94,11 +93,13 @@ github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6 github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server github.com/hashicorp/terraform-plugin-go/tftypes -# github.com/hashicorp/terraform-plugin-log v0.2.0 +# github.com/hashicorp/terraform-plugin-log v0.4.1 +## explicit +github.com/hashicorp/terraform-plugin-log/internal/hclogutils github.com/hashicorp/terraform-plugin-log/internal/logging github.com/hashicorp/terraform-plugin-log/tflog github.com/hashicorp/terraform-plugin-log/tfsdklog -# github.com/hashicorp/terraform-plugin-sdk/v2 v2.10.1 +# github.com/hashicorp/terraform-plugin-sdk/v2 v2.17.0 ## explicit github.com/hashicorp/terraform-plugin-sdk/v2/diag github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest @@ -111,6 +112,7 @@ github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode +github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest @@ -118,52 +120,65 @@ github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags github.com/hashicorp/terraform-plugin-sdk/v2/meta github.com/hashicorp/terraform-plugin-sdk/v2/plugin github.com/hashicorp/terraform-plugin-sdk/v2/terraform -# github.com/hashicorp/terraform-registry-address v0.0.0-20210412075316-9b2996cce896 +# github.com/hashicorp/terraform-registry-address v0.0.0-20220510144317-d78f4a47ae27 +## explicit github.com/hashicorp/terraform-registry-address # github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 github.com/hashicorp/terraform-svchost -# github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d +# github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 +## explicit github.com/hashicorp/yamux - -# github.com/heimweh/go-pagerduty v0.0.0-20220527195341-4e587aa9b58e +# github.com/heimweh/go-pagerduty v0.0.0-20220613181429-a35543968162 ## explicit github.com/heimweh/go-pagerduty/pagerduty -# github.com/klauspost/compress v1.11.2 +# github.com/klauspost/compress v1.15.6 +github.com/klauspost/compress github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 -github.com/klauspost/compress/snappy +github.com/klauspost/compress/internal/cpuinfo +github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash -# github.com/mattn/go-colorable v0.1.4 +# github.com/mattn/go-colorable v0.1.12 github.com/mattn/go-colorable -# github.com/mattn/go-isatty v0.0.10 +# github.com/mattn/go-isatty v0.0.14 github.com/mattn/go-isatty # github.com/mitchellh/copystructure v1.2.0 github.com/mitchellh/copystructure # github.com/mitchellh/go-testing-interface v1.14.1 github.com/mitchellh/go-testing-interface -# github.com/mitchellh/go-wordwrap v1.0.0 +# github.com/mitchellh/go-wordwrap v1.0.1 +## explicit github.com/mitchellh/go-wordwrap -# github.com/mitchellh/mapstructure v1.1.2 +# github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/mapstructure # github.com/mitchellh/reflectwalk v1.0.2 github.com/mitchellh/reflectwalk -# github.com/oklog/run v1.0.0 +# github.com/oklog/run v1.1.0 +## explicit github.com/oklog/run # github.com/pkg/errors v0.9.1 github.com/pkg/errors # github.com/vmihailenco/msgpack v4.0.4+incompatible github.com/vmihailenco/msgpack github.com/vmihailenco/msgpack/codes +# github.com/vmihailenco/msgpack/v4 v4.3.12 +github.com/vmihailenco/msgpack/v4 +github.com/vmihailenco/msgpack/v4/codes +# github.com/vmihailenco/tagparser v0.1.2 +## explicit +github.com/vmihailenco/tagparser +github.com/vmihailenco/tagparser/internal +github.com/vmihailenco/tagparser/internal/parser # github.com/xdg-go/pbkdf2 v1.0.0 github.com/xdg-go/pbkdf2 -# github.com/xdg-go/scram v1.0.2 +# github.com/xdg-go/scram v1.1.1 github.com/xdg-go/scram -# github.com/xdg-go/stringprep v1.0.2 +# github.com/xdg-go/stringprep v1.0.3 github.com/xdg-go/stringprep -# github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d +# github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a github.com/youmark/pkcs8 -# github.com/zclconf/go-cty v1.9.1 +# github.com/zclconf/go-cty v1.10.0 github.com/zclconf/go-cty/cty github.com/zclconf/go-cty/cty/convert github.com/zclconf/go-cty/cty/function @@ -171,7 +186,7 @@ github.com/zclconf/go-cty/cty/function/stdlib github.com/zclconf/go-cty/cty/gocty github.com/zclconf/go-cty/cty/json github.com/zclconf/go-cty/cty/set -# go.mongodb.org/mongo-driver v1.7.2 +# go.mongodb.org/mongo-driver v1.9.1 go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson/bsoncodec go.mongodb.org/mongo-driver/bson/bsonoptions @@ -180,6 +195,7 @@ go.mongodb.org/mongo-driver/bson/bsontype go.mongodb.org/mongo-driver/bson/primitive go.mongodb.org/mongo-driver/event go.mongodb.org/mongo-driver/internal +go.mongodb.org/mongo-driver/internal/randutil go.mongodb.org/mongo-driver/mongo go.mongodb.org/mongo-driver/mongo/address go.mongodb.org/mongo-driver/mongo/description @@ -205,14 +221,14 @@ go.mongodb.org/mongo-driver/x/mongo/driver/session go.mongodb.org/mongo-driver/x/mongo/driver/topology go.mongodb.org/mongo-driver/x/mongo/driver/uuid go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage -# golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e +# golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 golang.org/x/crypto/chacha20 golang.org/x/crypto/curve25519 golang.org/x/crypto/curve25519/internal/field golang.org/x/crypto/ed25519 -golang.org/x/crypto/ed25519/internal/edwards25519 +golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/internal/subtle golang.org/x/crypto/ocsp golang.org/x/crypto/openpgp @@ -222,10 +238,11 @@ golang.org/x/crypto/openpgp/errors golang.org/x/crypto/openpgp/packet golang.org/x/crypto/openpgp/s2k golang.org/x/crypto/pbkdf2 -golang.org/x/crypto/poly1305 +golang.org/x/crypto/scrypt golang.org/x/crypto/ssh golang.org/x/crypto/ssh/internal/bcrypt_pbkdf -# golang.org/x/net v0.0.0-20210326060303-6b1517762897 +# golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 +## explicit golang.org/x/net/context golang.org/x/net/http/httpguts golang.org/x/net/http2 @@ -233,23 +250,20 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 +# golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f golang.org/x/sync/errgroup -golang.org/x/sync/semaphore -# golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 +# golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c +## explicit golang.org/x/sys/cpu golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix -# golang.org/x/text v0.3.5 +# golang.org/x/text v0.3.7 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd +# google.golang.org/appengine v1.6.7 ## explicit -# google.golang.org/api v0.35.0 -## explicit -# google.golang.org/appengine v1.6.6 google.golang.org/appengine google.golang.org/appengine/datastore google.golang.org/appengine/datastore/internal/cloudkey @@ -261,11 +275,10 @@ google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api -# google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb +# google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad ## explicit google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.33.2 -## explicit +# google.golang.org/grpc v1.47.0 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -274,9 +287,11 @@ google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/channelz google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials +google.golang.org/grpc/credentials/insecure google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog @@ -284,6 +299,7 @@ google.golang.org/grpc/health google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancer/gracefulswitch google.golang.org/grpc/internal/balancerload google.golang.org/grpc/internal/binarylog google.golang.org/grpc/internal/buffer @@ -294,12 +310,17 @@ google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/metadata +google.golang.org/grpc/internal/pretty +google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/passthrough +google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport +google.golang.org/grpc/internal/transport/networktype google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer @@ -310,29 +331,31 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.25.0 +# google.golang.org/protobuf v1.28.0 +google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt google.golang.org/protobuf/internal/descopts google.golang.org/protobuf/internal/detrand google.golang.org/protobuf/internal/encoding/defval +google.golang.org/protobuf/internal/encoding/json google.golang.org/protobuf/internal/encoding/messageset google.golang.org/protobuf/internal/encoding/tag google.golang.org/protobuf/internal/encoding/text google.golang.org/protobuf/internal/errors -google.golang.org/protobuf/internal/fieldsort google.golang.org/protobuf/internal/filedesc google.golang.org/protobuf/internal/filetype google.golang.org/protobuf/internal/flags google.golang.org/protobuf/internal/genid google.golang.org/protobuf/internal/impl -google.golang.org/protobuf/internal/mapsort +google.golang.org/protobuf/internal/order google.golang.org/protobuf/internal/pragma google.golang.org/protobuf/internal/set google.golang.org/protobuf/internal/strs google.golang.org/protobuf/internal/version google.golang.org/protobuf/proto +google.golang.org/protobuf/reflect/protodesc google.golang.org/protobuf/reflect/protoreflect google.golang.org/protobuf/reflect/protoregistry google.golang.org/protobuf/runtime/protoiface